rebase: bump google.golang.org/grpc from 1.44.0 to 1.45.0

Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.44.0 to 1.45.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.44.0...v1.45.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2022-03-14 20:09:40 +00:00 committed by mergify[bot]
parent c3e35f8849
commit 13dcc89ac8
18 changed files with 238 additions and 361 deletions

2
go.mod
View File

@ -24,7 +24,7 @@ require (
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
google.golang.org/grpc v1.44.0
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.27.1
k8s.io/api v0.23.4
k8s.io/apimachinery v0.23.4

4
go.sum
View File

@ -1619,8 +1619,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=

View File

@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly.
- **All tests need to be passing** before your change can be merged. We
recommend you **run tests locally** before creating your PR to catch breakages
early on.
- `make all` to test everything, OR
- `make vet` to catch vet errors
- `make test` to run the tests
- `make testrace` to run tests in race mode
- `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
- Exceptions to the rules can be made if there's a compelling reason for doing so.

View File

@ -79,7 +79,7 @@ var (
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
// errTransportCredsAndBundle indicates that creds bundle is used together
// with other individual Transport Credentials.
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")

View File

@ -70,3 +70,29 @@ type info struct {
func (info) AuthType() string {
return "insecure"
}
// insecureBundle implements an insecure bundle.
// An insecure bundle provides a thin wrapper around insecureTC to support
// the credentials.Bundle interface.
type insecureBundle struct{}
// NewBundle returns a bundle with disabled transport security and no per rpc credential.
func NewBundle() credentials.Bundle {
return insecureBundle{}
}
// NewWithMode returns a new insecure Bundle. The mode is ignored.
func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
return insecureBundle{}, nil
}
// PerRPCCredentials returns an nil implementation as insecure
// bundle does not support a per rpc credential.
func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
return nil
}
// TransportCredentials returns the underlying insecure transport credential.
func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
return NewCredentials()
}

View File

@ -72,9 +72,12 @@ type UnaryServerInfo struct {
}
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
// the status message of the RPC.
// execution of a unary RPC.
//
// If a UnaryHandler returns an error, it should either be produced by the
// status package, or be one of the context errors. Otherwise, gRPC will use
// codes.Unknown as the status code and err.Error() as the status message of the
// RPC.
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info

View File

@ -24,6 +24,7 @@
package channelz
import (
"context"
"fmt"
"sort"
"sync"
@ -49,7 +50,8 @@ var (
// TurnOn turns on channelz data collection.
func TurnOn() {
if !IsOn() {
NewChannelzStorage()
db.set(newChannelMap())
idGen.reset()
atomic.StoreInt32(&curState, 1)
}
}
@ -94,46 +96,40 @@ func (d *dbWrapper) get() *channelMap {
return d.DB
}
// NewChannelzStorage initializes channelz data storage and id generator.
// NewChannelzStorageForTesting initializes channelz data storage and id
// generator for testing purposes.
//
// This function returns a cleanup function to wait for all channelz state to be reset by the
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
// to remove some entity just register by the new test, since the id space is the same.
//
// Note: This function is exported for testing purpose only. User should not call
// it in most cases.
func NewChannelzStorage() (cleanup func() error) {
db.set(&channelMap{
topLevelChannels: make(map[int64]struct{}),
channels: make(map[int64]*channel),
listenSockets: make(map[int64]*listenSocket),
normalSockets: make(map[int64]*normalSocket),
servers: make(map[int64]*server),
subChannels: make(map[int64]*subChannel),
})
// Returns a cleanup function to be invoked by the test, which waits for up to
// 10s for all channelz state to be reset by the grpc goroutines when those
// entities get closed. This cleanup function helps with ensuring that tests
// don't mess up each other.
func NewChannelzStorageForTesting() (cleanup func() error) {
db.set(newChannelMap())
idGen.reset()
return func() error {
var err error
cm := db.get()
if cm == nil {
return nil
}
for i := 0; i < 1000; i++ {
cm.mu.Lock()
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
cm.mu.Unlock()
// all things stored in the channelz map have been cleared.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for {
cm.mu.RLock()
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
cm.mu.RUnlock()
if err := ctx.Err(); err != nil {
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
}
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
return nil
}
cm.mu.Unlock()
time.Sleep(10 * time.Millisecond)
<-ticker.C
}
cm.mu.Lock()
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
cm.mu.Unlock()
return err
}
}
@ -326,6 +322,17 @@ type channelMap struct {
normalSockets map[int64]*normalSocket
}
func newChannelMap() *channelMap {
return &channelMap{
topLevelChannels: make(map[int64]struct{}),
channels: make(map[int64]*channel),
listenSockets: make(map[int64]*listenSocket),
normalSockets: make(map[int64]*normalSocket),
servers: make(map[int64]*server),
subChannels: make(map[int64]*subChannel),
}
}
func (c *channelMap) addServer(id int64, s *server) {
c.mu.Lock()
s.cm = c

View File

@ -26,13 +26,13 @@ import (
const (
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
// Do not use this and read from env directly. Its value is read and kept in
// variable BootstrapFileName.
// variable XDSBootstrapFileName.
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
// XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
// content. Do not use this and read from env directly. Its value is read
// and kept in variable BootstrapFileName.
// and kept in variable XDSBootstrapFileContent.
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
@ -41,6 +41,7 @@ const (
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
@ -82,7 +83,10 @@ var (
// which can be disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
// XDSOutlierDetection indicates whether outlier detection support is
// enabled, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true")
// XDSFederation indicates whether federation support is enabled.
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")

View File

@ -38,11 +38,10 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfigForTesting is for creating a fake
// ClientConn for resolver testing only
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
// ParseServiceConfig parses a JSON representation of the service config.
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
// parsing. Both a and b should be returned by ParseServiceConfig.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool

View File

@ -741,6 +741,12 @@ func (e ConnectionError) Origin() error {
return e.err
}
// Unwrap returns the original error of this connection error or nil when the
// origin is nil.
func (e ConnectionError) Unwrap() error {
return e.err
}
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")

View File

@ -37,21 +37,17 @@ To register server reflection on a gRPC server:
package reflection // import "google.golang.org/grpc/reflection"
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"reflect"
"sort"
"sync"
"github.com/golang/protobuf/proto"
dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// GRPCServer is the interface provided by a gRPC server. It is implemented by
@ -59,339 +55,174 @@ import (
// as a registry, for accumulating the services exposed by the server.
type GRPCServer interface {
grpc.ServiceRegistrar
GetServiceInfo() map[string]grpc.ServiceInfo
ServiceInfoProvider
}
var _ GRPCServer = (*grpc.Server)(nil)
type serverReflectionServer struct {
rpb.UnimplementedServerReflectionServer
s GRPCServer
initSymbols sync.Once
serviceNames []string
symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files
}
// Register registers the server reflection service on the given gRPC server.
func Register(s GRPCServer) {
rpb.RegisterServerReflectionServer(s, &serverReflectionServer{
s: s,
})
svr := NewServer(ServerOptions{Services: s})
rpb.RegisterServerReflectionServer(s, svr)
}
// protoMessage is used for type assertion on proto messages.
// Generated proto message implements function Descriptor(), but Descriptor()
// is not part of interface proto.Message. This interface is needed to
// call Descriptor().
type protoMessage interface {
Descriptor() ([]byte, []int)
// ServiceInfoProvider is an interface used to retrieve metadata about the
// services to expose.
//
// The reflection service is only interested in the service names, but the
// signature is this way so that *grpc.Server implements it. So it is okay
// for a custom implementation to return zero values for the
// grpc.ServiceInfo values in the map.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type ServiceInfoProvider interface {
GetServiceInfo() map[string]grpc.ServiceInfo
}
func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) {
s.initSymbols.Do(func() {
serviceInfo := s.s.GetServiceInfo()
s.symbols = map[string]*dpb.FileDescriptorProto{}
s.serviceNames = make([]string, 0, len(serviceInfo))
processed := map[string]struct{}{}
for svc, info := range serviceInfo {
s.serviceNames = append(s.serviceNames, svc)
fdenc, ok := parseMetadata(info.Metadata)
if !ok {
continue
}
fd, err := decodeFileDesc(fdenc)
if err != nil {
continue
}
s.processFile(fd, processed)
}
sort.Strings(s.serviceNames)
})
return s.serviceNames, s.symbols
// ExtensionResolver is the interface used to query details about extensions.
// This interface is satisfied by protoregistry.GlobalTypes.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type ExtensionResolver interface {
protoregistry.ExtensionTypeResolver
RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
}
func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) {
filename := fd.GetName()
if _, ok := processed[filename]; ok {
return
// ServerOptions represents the options used to construct a reflection server.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type ServerOptions struct {
// The source of advertised RPC services. If not specified, the reflection
// server will report an empty list when asked to list services.
//
// This value will typically be a *grpc.Server. But the set of advertised
// services can be customized by wrapping a *grpc.Server or using an
// alternate implementation that returns a custom set of service names.
Services ServiceInfoProvider
// Optional resolver used to load descriptors. If not specified,
// protoregistry.GlobalFiles will be used.
DescriptorResolver protodesc.Resolver
// Optional resolver used to query for known extensions. If not specified,
// protoregistry.GlobalTypes will be used.
ExtensionResolver ExtensionResolver
}
processed[filename] = struct{}{}
prefix := fd.GetPackage()
for _, msg := range fd.MessageType {
s.processMessage(fd, prefix, msg)
// NewServer returns a reflection server implementation using the given options.
// This can be used to customize behavior of the reflection service. Most usages
// should prefer to use Register instead.
//
// Experimental
//
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func NewServer(opts ServerOptions) rpb.ServerReflectionServer {
if opts.DescriptorResolver == nil {
opts.DescriptorResolver = protoregistry.GlobalFiles
}
for _, en := range fd.EnumType {
s.processEnum(fd, prefix, en)
if opts.ExtensionResolver == nil {
opts.ExtensionResolver = protoregistry.GlobalTypes
}
for _, ext := range fd.Extension {
s.processField(fd, prefix, ext)
}
for _, svc := range fd.Service {
svcName := fqn(prefix, svc.GetName())
s.symbols[svcName] = fd
for _, meth := range svc.Method {
name := fqn(svcName, meth.GetName())
s.symbols[name] = fd
return &serverReflectionServer{
s: opts.Services,
descResolver: opts.DescriptorResolver,
extResolver: opts.ExtensionResolver,
}
}
for _, dep := range fd.Dependency {
fdenc := proto.FileDescriptor(dep)
fdDep, err := decodeFileDesc(fdenc)
if err != nil {
continue
}
s.processFile(fdDep, processed)
}
}
func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) {
msgName := fqn(prefix, msg.GetName())
s.symbols[msgName] = fd
for _, nested := range msg.NestedType {
s.processMessage(fd, msgName, nested)
}
for _, en := range msg.EnumType {
s.processEnum(fd, msgName, en)
}
for _, ext := range msg.Extension {
s.processField(fd, msgName, ext)
}
for _, fld := range msg.Field {
s.processField(fd, msgName, fld)
}
for _, oneof := range msg.OneofDecl {
oneofName := fqn(msgName, oneof.GetName())
s.symbols[oneofName] = fd
}
}
func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) {
enName := fqn(prefix, en.GetName())
s.symbols[enName] = fd
for _, val := range en.Value {
valName := fqn(enName, val.GetName())
s.symbols[valName] = fd
}
}
func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) {
fldName := fqn(prefix, fld.GetName())
s.symbols[fldName] = fd
}
func fqn(prefix, name string) string {
if prefix == "" {
return name
}
return prefix + "." + name
}
// fileDescForType gets the file descriptor for the given type.
// The given type should be a proto message.
func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) {
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage)
if !ok {
return nil, fmt.Errorf("failed to create message from type: %v", st)
}
enc, _ := m.Descriptor()
return decodeFileDesc(enc)
}
// decodeFileDesc does decompression and unmarshalling on the given
// file descriptor byte slice.
func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
raw, err := decompress(enc)
if err != nil {
return nil, fmt.Errorf("failed to decompress enc: %v", err)
}
fd := new(dpb.FileDescriptorProto)
if err := proto.Unmarshal(raw, fd); err != nil {
return nil, fmt.Errorf("bad descriptor: %v", err)
}
return fd, nil
}
// decompress does gzip decompression.
func decompress(b []byte) ([]byte, error) {
r, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
}
out, err := ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
}
return out, nil
}
func typeForName(name string) (reflect.Type, error) {
pt := proto.MessageType(name)
if pt == nil {
return nil, fmt.Errorf("unknown type: %q", name)
}
st := pt.Elem()
return st, nil
}
func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to create message from type: %v", st)
}
var extDesc *proto.ExtensionDesc
for id, desc := range proto.RegisteredExtensions(m) {
if id == ext {
extDesc = desc
break
}
}
if extDesc == nil {
return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext)
}
return decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
}
func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) {
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to create message from type: %v", st)
}
exts := proto.RegisteredExtensions(m)
out := make([]int32, 0, len(exts))
for id := range exts {
out = append(out, id)
}
return out, nil
type serverReflectionServer struct {
rpb.UnimplementedServerReflectionServer
s ServiceInfoProvider
descResolver protodesc.Resolver
extResolver ExtensionResolver
}
// fileDescWithDependencies returns a slice of serialized fileDescriptors in
// wire format ([]byte). The fileDescriptors will include fd and all the
// transitive dependencies of fd with names not in sentFileDescriptors.
func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) {
r := [][]byte{}
queue := []*dpb.FileDescriptorProto{fd}
func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) {
var r [][]byte
queue := []protoreflect.FileDescriptor{fd}
for len(queue) > 0 {
currentfd := queue[0]
queue = queue[1:]
if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent {
sentFileDescriptors[currentfd.GetName()] = true
currentfdEncoded, err := proto.Marshal(currentfd)
if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent {
sentFileDescriptors[currentfd.Path()] = true
fdProto := protodesc.ToFileDescriptorProto(currentfd)
currentfdEncoded, err := proto.Marshal(fdProto)
if err != nil {
return nil, err
}
r = append(r, currentfdEncoded)
}
for _, dep := range currentfd.Dependency {
fdenc := proto.FileDescriptor(dep)
fdDep, err := decodeFileDesc(fdenc)
if err != nil {
continue
}
queue = append(queue, fdDep)
for i := 0; i < currentfd.Imports().Len(); i++ {
queue = append(queue, currentfd.Imports().Get(i))
}
}
return r, nil
}
// fileDescEncodingByFilename finds the file descriptor for given filename,
// finds all of its previously unsent transitive dependencies, does marshalling
// on them, and returns the marshalled result.
func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) {
enc := proto.FileDescriptor(name)
if enc == nil {
return nil, fmt.Errorf("unknown file: %v", name)
}
fd, err := decodeFileDesc(enc)
if err != nil {
return nil, err
}
return fileDescWithDependencies(fd, sentFileDescriptors)
}
// parseMetadata finds the file descriptor bytes specified meta.
// For SupportPackageIsVersion4, m is the name of the proto file, we
// call proto.FileDescriptor to get the byte slice.
// For SupportPackageIsVersion3, m is a byte slice itself.
func parseMetadata(meta interface{}) ([]byte, bool) {
// Check if meta is the file name.
if fileNameForMeta, ok := meta.(string); ok {
return proto.FileDescriptor(fileNameForMeta), true
}
// Check if meta is the byte slice.
if enc, ok := meta.([]byte); ok {
return enc, true
}
return nil, false
}
// fileDescEncodingContainingSymbol finds the file descriptor containing the
// given symbol, finds all of its previously unsent transitive dependencies,
// does marshalling on them, and returns the marshalled result. The given symbol
// can be a type, a service or a method.
func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) {
_, symbols := s.getSymbols()
fd := symbols[name]
if fd == nil {
// Check if it's a type name that was not present in the
// transitive dependencies of the registered services.
if st, err := typeForName(name); err == nil {
fd, err = s.fileDescForType(st)
d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name))
if err != nil {
return nil, err
}
}
}
if fd == nil {
return nil, fmt.Errorf("unknown symbol: %v", name)
}
return fileDescWithDependencies(fd, sentFileDescriptors)
return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors)
}
// fileDescEncodingContainingExtension finds the file descriptor containing
// given extension, finds all of its previously unsent transitive dependencies,
// does marshalling on them, and returns the marshalled result.
func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) {
st, err := typeForName(typeName)
xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum))
if err != nil {
return nil, err
}
fd, err := fileDescContainingExtension(st, extNum)
if err != nil {
return nil, err
}
return fileDescWithDependencies(fd, sentFileDescriptors)
return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors)
}
// allExtensionNumbersForTypeName returns all extension numbers for the given type.
func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) {
st, err := typeForName(name)
if err != nil {
var numbers []int32
s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool {
numbers = append(numbers, int32(xt.TypeDescriptor().Number()))
return true
})
sort.Slice(numbers, func(i, j int) bool {
return numbers[i] < numbers[j]
})
if len(numbers) == 0 {
// maybe return an error if given type name is not known
if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil {
return nil, err
}
extNums, err := s.allExtensionNumbersForType(st)
if err != nil {
return nil, err
}
return extNums, nil
return numbers, nil
}
// listServices returns the names of services this server exposes.
func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse {
serviceInfo := s.s.GetServiceInfo()
resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo))
for svc := range serviceInfo {
resp = append(resp, &rpb.ServiceResponse{Name: svc})
}
sort.Slice(resp, func(i, j int) bool {
return resp[i].Name < resp[j].Name
})
return resp
}
// ServerReflectionInfo is the reflection service handler.
@ -412,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio
}
switch req := in.MessageRequest.(type) {
case *rpb.ServerReflectionRequest_FileByFilename:
b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors)
var b [][]byte
fd, err := s.descResolver.FindFileByPath(req.FileByFilename)
if err == nil {
b, err = s.fileDescWithDependencies(fd, sentFileDescriptors)
}
if err != nil {
out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
ErrorResponse: &rpb.ErrorResponse{
@ -473,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio
}
}
case *rpb.ServerReflectionRequest_ListServices:
svcNames, _ := s.getSymbols()
serviceResponses := make([]*rpb.ServiceResponse, len(svcNames))
for i, n := range svcNames {
serviceResponses[i] = &rpb.ServiceResponse{
Name: n,
}
}
out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{
ListServicesResponse: &rpb.ListServiceResponse{
Service: serviceResponses,
Service: s.listServices(),
},
}
default:

View File

@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH}
mkdir -p ${GOBIN}
echo "remove existing generated files"
# grpc_testingv3/testv3.pb.go is not re-generated because it was
# intentionally generated by an older version of protoc-gen-go.
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
# grpc_testing_not_regenerate/*.pb.go is not re-generated,
# see grpc_testing_not_regenerate/README.md for details.
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
@ -117,9 +117,9 @@ done
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
# grpc_testingv3/testv3.pb.go is not re-generated because it was
# intentionally generated by an older version of protoc-gen-go.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
# grpc_testing_not_regenerate/*.pb.go are not re-generated,
# see grpc_testing_not_regenerate/README.md for details.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
# grpc/service_config/service_config.proto does not have a go_package option.
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config

View File

@ -1283,9 +1283,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
// Convert appErr if it is not a grpc status error.
appErr = status.Error(codes.Unknown, appErr.Error())
appStatus, _ = status.FromError(appErr)
// Convert non-status application error to a status error with code
// Unknown, but handle context errors specifically.
appStatus = status.FromContextError(appErr)
appErr = appStatus.Err()
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
@ -1549,7 +1550,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
appStatus = status.New(codes.Unknown, appErr.Error())
// Convert non-status application error to a status error with code
// Unknown, but handle context errors specifically.
appStatus = status.FromContextError(appErr)
appErr = appStatus.Err()
}
if trInfo != nil {

View File

@ -218,7 +218,7 @@ type jsonSC struct {
}
func init() {
internal.ParseServiceConfigForTesting = parseServiceConfig
internal.ParseServiceConfig = parseServiceConfig
}
func parseServiceConfig(js string) *serviceconfig.ParseResult {
if len(js) == 0 {

View File

@ -46,10 +46,12 @@ import (
)
// StreamHandler defines the handler called by gRPC server to complete the
// execution of a streaming RPC. If a StreamHandler returns an error, it
// should be produced by the status package, or else gRPC will use
// codes.Unknown as the status code and err.Error() as the status message
// of the RPC.
// execution of a streaming RPC.
//
// If a StreamHandler returns an error, it should either be produced by the
// status package, or be one of the context errors. Otherwise, gRPC will use
// codes.Unknown as the status code and err.Error() as the status message of the
// RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification. Used

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.44.1-dev"
const Version = "1.45.0"

View File

@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
go vet -all ./... | fail_on_output
gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:"
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
go mod tidy
git status --porcelain 2>&1 | fail_on_output || \

2
vendor/modules.txt vendored
View File

@ -590,7 +590,7 @@ google.golang.org/appengine/urlfetch
google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.44.0
# google.golang.org/grpc v1.45.0
## explicit; go 1.14
google.golang.org/grpc
google.golang.org/grpc/attributes