rebase: bump google.golang.org/grpc from 1.71.0 to 1.71.1

Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.71.0 to 1.71.1.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.71.0...v1.71.1)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-version: 1.71.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2025-04-07 22:09:09 +00:00 committed by mergify[bot]
parent 0f572b62d1
commit f4cea29ca6
8 changed files with 104 additions and 88 deletions

2
go.mod
View File

@ -32,7 +32,7 @@ require (
golang.org/x/crypto v0.36.0
golang.org/x/net v0.37.0
golang.org/x/sys v0.31.0
google.golang.org/grpc v1.71.0
google.golang.org/grpc v1.71.1
google.golang.org/protobuf v1.36.6
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3

4
go.sum
View File

@ -1287,8 +1287,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View File

@ -44,15 +44,19 @@ var (
//
// It implements the [resolver.Resolver] interface.
type delegatingResolver struct {
target resolver.Target // parsed target URI to be resolved
cc resolver.ClientConn // gRPC ClientConn
targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
proxyURL *url.URL // proxy URL, derived from proxy environment and target
target resolver.Target // parsed target URI to be resolved
cc resolver.ClientConn // gRPC ClientConn
proxyURL *url.URL // proxy URL, derived from proxy environment and target
mu sync.Mutex // protects all the fields below
targetResolverState *resolver.State // state of the target resolver
proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
// childMu serializes calls into child resolvers. It also protects access to
// the following fields.
childMu sync.Mutex
targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
}
// nopResolver is a resolver that does nothing.
@ -111,6 +115,10 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
logger.Infof("Proxy URL detected : %s", r.proxyURL)
}
// Resolver updates from one child may trigger calls into the other. Block
// updates until the children are initialized.
r.childMu.Lock()
defer r.childMu.Unlock()
// When the scheme is 'dns' and target resolution on client is not enabled,
// resolution should be handled by the proxy, not the client. Therefore, we
// bypass the target resolver and store the unresolved target address.
@ -165,11 +173,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol
}
func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
r.childMu.Lock()
defer r.childMu.Unlock()
r.targetResolver.ResolveNow(o)
r.proxyResolver.ResolveNow(o)
}
func (r *delegatingResolver) Close() {
r.childMu.Lock()
defer r.childMu.Unlock()
r.targetResolver.Close()
r.targetResolver = nil
@ -267,11 +279,17 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro
err := r.updateClientConnStateLocked()
// Another possible approach was to block until updates are received from
// both resolvers. But this is not used because calling `New()` triggers
// `Build()` for the first resolver, which calls `UpdateState()`. And the
// `Build()` for the first resolver, which calls `UpdateState()`. And the
// second resolver hasn't sent an update yet, so it would cause `New()` to
// block indefinitely.
if err != nil {
r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
go func() {
r.childMu.Lock()
defer r.childMu.Unlock()
if r.targetResolver != nil {
r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
}
}()
}
return err
}
@ -291,7 +309,13 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err
r.targetResolverState = &state
err := r.updateClientConnStateLocked()
if err != nil {
r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
go func() {
r.childMu.Lock()
defer r.childMu.Unlock()
if r.proxyResolver != nil {
r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
}
}()
}
return nil
}

View File

@ -18,6 +18,12 @@
package resolver
import (
"encoding/base64"
"sort"
"strings"
)
type addressMapEntry struct {
addr Address
value any
@ -137,66 +143,61 @@ func (a *AddressMap) Values() []any {
return ret
}
type endpointNode struct {
addrs map[string]struct{}
}
// Equal returns whether the unordered set of addrs are the same between the
// endpoint nodes.
func (en *endpointNode) Equal(en2 *endpointNode) bool {
if len(en.addrs) != len(en2.addrs) {
return false
}
for addr := range en.addrs {
if _, ok := en2.addrs[addr]; !ok {
return false
}
}
return true
}
func toEndpointNode(endpoint Endpoint) endpointNode {
en := make(map[string]struct{})
for _, addr := range endpoint.Addresses {
en[addr.Addr] = struct{}{}
}
return endpointNode{
addrs: en,
}
}
type endpointMapKey string
// EndpointMap is a map of endpoints to arbitrary values keyed on only the
// unordered set of address strings within an endpoint. This map is not thread
// safe, thus it is unsafe to access concurrently. Must be created via
// NewEndpointMap; do not construct directly.
type EndpointMap struct {
endpoints map[*endpointNode]any
endpoints map[endpointMapKey]endpointData
}
type endpointData struct {
// decodedKey stores the original key to avoid decoding when iterating on
// EndpointMap keys.
decodedKey Endpoint
value any
}
// NewEndpointMap creates a new EndpointMap.
func NewEndpointMap() *EndpointMap {
return &EndpointMap{
endpoints: make(map[*endpointNode]any),
endpoints: make(map[endpointMapKey]endpointData),
}
}
// encodeEndpoint returns a string that uniquely identifies the unordered set of
// addresses within an endpoint.
func encodeEndpoint(e Endpoint) endpointMapKey {
addrs := make([]string, 0, len(e.Addresses))
// base64 encoding the address strings restricts the characters present
// within the strings. This allows us to use a delimiter without the need of
// escape characters.
for _, addr := range e.Addresses {
addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
}
sort.Strings(addrs)
// " " should not appear in base64 encoded strings.
return endpointMapKey(strings.Join(addrs, " "))
}
// Get returns the value for the address in the map, if present.
func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
en := toEndpointNode(e)
if endpoint := em.find(en); endpoint != nil {
return em.endpoints[endpoint], true
val, found := em.endpoints[encodeEndpoint(e)]
if found {
return val.value, true
}
return nil, false
}
// Set updates or adds the value to the address in the map.
func (em *EndpointMap) Set(e Endpoint, value any) {
en := toEndpointNode(e)
if endpoint := em.find(en); endpoint != nil {
em.endpoints[endpoint] = value
return
en := encodeEndpoint(e)
em.endpoints[en] = endpointData{
decodedKey: Endpoint{Addresses: e.Addresses},
value: value,
}
em.endpoints[&en] = value
}
// Len returns the number of entries in the map.
@ -211,12 +212,8 @@ func (em *EndpointMap) Len() int {
// used for EndpointMap accesses.
func (em *EndpointMap) Keys() []Endpoint {
ret := make([]Endpoint, 0, len(em.endpoints))
for en := range em.endpoints {
var endpoint Endpoint
for addr := range en.addrs {
endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
}
ret = append(ret, endpoint)
for _, en := range em.endpoints {
ret = append(ret, en.decodedKey)
}
return ret
}
@ -225,27 +222,13 @@ func (em *EndpointMap) Keys() []Endpoint {
func (em *EndpointMap) Values() []any {
ret := make([]any, 0, len(em.endpoints))
for _, val := range em.endpoints {
ret = append(ret, val)
ret = append(ret, val.value)
}
return ret
}
// find returns a pointer to the endpoint node in em if the endpoint node is
// already present. If not found, nil is returned. The comparisons are done on
// the unordered set of addresses within an endpoint.
func (em EndpointMap) find(e endpointNode) *endpointNode {
for endpoint := range em.endpoints {
if e.Equal(endpoint) {
return endpoint
}
}
return nil
}
// Delete removes the specified endpoint from the map.
func (em *EndpointMap) Delete(e Endpoint) {
en := toEndpointNode(e)
if entry := em.find(en); entry != nil {
delete(em.endpoints, entry)
}
en := encodeEndpoint(e)
delete(em.endpoints, en)
}

View File

@ -134,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
return nil
}
if s.Endpoints == nil {
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
for _, a := range s.Addresses {
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
ep.Addresses[0].BalancerAttributes = nil
s.Endpoints = append(s.Endpoints, ep)
}
s.Endpoints = addressesToEndpoints(s.Addresses)
}
ccr.addChannelzTraceEvent(s)
ccr.curState = s
@ -172,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
ccr.cc.mu.Unlock()
return
}
s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
s := resolver.State{
Addresses: addrs,
ServiceConfig: ccr.curState.ServiceConfig,
Endpoints: addressesToEndpoints(addrs),
}
ccr.addChannelzTraceEvent(s)
ccr.curState = s
ccr.mu.Unlock()
@ -210,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
}
channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
endpoints := make([]resolver.Endpoint, 0, len(addrs))
for _, a := range addrs {
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
ep.Addresses[0].BalancerAttributes = nil
endpoints = append(endpoints, ep)
}
return endpoints
}

View File

@ -870,13 +870,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
}
out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool)
// Read at most one byte more than the limit from the decompressor.
// Unless the limit is MaxInt64, in which case, that's impossible, so
// apply no limit.
if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
dcReader = io.LimitReader(dcReader, limit+1)
}
out, err := mem.ReadAll(dcReader, pool)
if err != nil {
out.Free()
return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
}
if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) {
if out.Len() > maxReceiveMessageSize {
out.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
}
@ -885,12 +891,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
}
// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF.
func atEOF(dcReader io.Reader) bool {
n, err := dcReader.Read(make([]byte, 1))
return n == 0 && err == io.EOF
}
type recvCompressor interface {
RecvCompress() string
}

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.71.0"
const Version = "1.71.1"

2
vendor/modules.txt vendored
View File

@ -608,7 +608,7 @@ gomodules.xyz/jsonpatch/v2
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f
## explicit; go 1.22
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.71.0
# google.golang.org/grpc v1.71.1
## explicit; go 1.22.0
google.golang.org/grpc
google.golang.org/grpc/attributes