mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: bump google.golang.org/grpc from 1.59.0 to 1.60.1
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.59.0 to 1.60.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.59.0...v1.60.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
c807059618
commit
0ec64b7552
235
vendor/google.golang.org/grpc/server.go
generated
vendored
235
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -70,6 +70,10 @@ func init() {
|
||||
internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
|
||||
return srv.opts.creds
|
||||
}
|
||||
internal.IsRegisteredMethod = func(srv *Server, method string) bool {
|
||||
return srv.isRegisteredMethod(method)
|
||||
}
|
||||
internal.ServerFromContext = serverFromContext
|
||||
internal.DrainServerTransports = func(srv *Server, addr string) {
|
||||
srv.drainServerTransports(addr)
|
||||
}
|
||||
@ -81,6 +85,7 @@ func init() {
|
||||
}
|
||||
internal.BinaryLogger = binaryLogger
|
||||
internal.JoinServerOptions = newJoinServerOption
|
||||
internal.RecvBufferPool = recvBufferPool
|
||||
}
|
||||
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
@ -139,7 +144,8 @@ type Server struct {
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannelClose func()
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
@ -578,11 +584,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
|
||||
// v1.60.0 or later.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return recvBufferPool(bufferPool)
|
||||
}
|
||||
|
||||
func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
@ -616,15 +624,14 @@ func (s *Server) serverWorker() {
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
|
||||
close(s.serverWorkerChannel)
|
||||
})
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
go s.serverWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) stopServerWorkers() {
|
||||
close(s.serverWorkerChannel)
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
@ -806,6 +813,18 @@ func (l *listenSocket) Close() error {
|
||||
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
|
||||
// this method returns.
|
||||
// Serve will return a non-nil error unless Stop or GracefulStop is called.
|
||||
//
|
||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
||||
// with OS defaults for keepalive time and interval, callers need to do the
|
||||
// following two things:
|
||||
// - pass a net.Listener created by calling the Listen method on a
|
||||
// net.ListenConfig with the `KeepAlive` field set to a negative value. This
|
||||
// will result in the Go standard library not overriding OS defaults for TCP
|
||||
// keepalive interval and time. But this will also result in the Go standard
|
||||
// library not enabling TCP keepalives by default.
|
||||
// - override the Accept method on the passed in net.Listener and set the
|
||||
// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
s.printf("serving")
|
||||
@ -917,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
s.serveStreams(st)
|
||||
s.serveStreams(context.Background(), st, rawConn)
|
||||
s.removeConn(lisAddr, st)
|
||||
}()
|
||||
}
|
||||
@ -971,18 +990,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
return st
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
|
||||
ctx = transport.SetConnection(ctx, rawConn)
|
||||
ctx = peer.NewContext(ctx, st.Peer())
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: st.Peer().Addr,
|
||||
LocalAddr: st.Peer().LocalAddr,
|
||||
})
|
||||
sh.HandleConn(ctx, &stats.ConnBegin{})
|
||||
}
|
||||
|
||||
defer func() {
|
||||
st.Close(errors.New("finished serving streams for the server transport"))
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleConn(ctx, &stats.ConnEnd{})
|
||||
}
|
||||
}()
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
|
||||
st.HandleStreams(ctx, func(stream *transport.Stream) {
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream)
|
||||
}
|
||||
|
||||
@ -996,7 +1026,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
}
|
||||
go f()
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var _ http.Handler = (*Server)(nil)
|
||||
@ -1040,7 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
defer s.removeConn(listenerAddressForServeHTTP, st)
|
||||
s.serveStreams(st)
|
||||
s.serveStreams(r.Context(), st, nil)
|
||||
}
|
||||
|
||||
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
||||
@ -1689,6 +1718,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
||||
ctx := stream.Context()
|
||||
ctx = contextWithServer(ctx, s)
|
||||
var ti *traceInfo
|
||||
if EnableTracing {
|
||||
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
@ -1697,7 +1727,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
tr: tr,
|
||||
firstLine: firstLine{
|
||||
client: false,
|
||||
remoteAddr: t.RemoteAddr(),
|
||||
remoteAddr: t.Peer().Addr,
|
||||
},
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
@ -1731,6 +1761,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
service := sm[:pos]
|
||||
method := sm[pos+1:]
|
||||
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
||||
sh.HandleRPC(ctx, &stats.InHeader{
|
||||
FullMethod: stream.Method(),
|
||||
RemoteAddr: t.Peer().Addr,
|
||||
LocalAddr: t.Peer().LocalAddr,
|
||||
Compression: stream.RecvCompress(),
|
||||
WireLength: stream.HeaderWireLength(),
|
||||
Header: md,
|
||||
})
|
||||
}
|
||||
// To have calls in stream callouts work. Will delete once all stats handler
|
||||
// calls come from the gRPC layer.
|
||||
stream.SetContext(ctx)
|
||||
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if md, ok := srv.methods[method]; ok {
|
||||
@ -1820,62 +1866,68 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.quit.Fire()
|
||||
|
||||
defer func() {
|
||||
s.serveWG.Wait()
|
||||
s.done.Fire()
|
||||
}()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
s.lis = nil
|
||||
conns := s.conns
|
||||
s.conns = nil
|
||||
// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
|
||||
s.cv.Broadcast()
|
||||
s.mu.Unlock()
|
||||
|
||||
for lis := range listeners {
|
||||
lis.Close()
|
||||
}
|
||||
for _, cs := range conns {
|
||||
for st := range cs {
|
||||
st.Close(errors.New("Server.Stop called"))
|
||||
}
|
||||
}
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
s.stopServerWorkers()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.stop(false)
|
||||
}
|
||||
|
||||
// GracefulStop stops the gRPC server gracefully. It stops the server from
|
||||
// accepting new connections and RPCs and blocks until all the pending RPCs are
|
||||
// finished.
|
||||
func (s *Server) GracefulStop() {
|
||||
s.stop(true)
|
||||
}
|
||||
|
||||
func (s *Server) stop(graceful bool) {
|
||||
s.quit.Fire()
|
||||
defer s.done.Fire()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
s.closeListenersLocked()
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
// new conns will be created.
|
||||
s.mu.Unlock()
|
||||
s.serveWG.Wait()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if graceful {
|
||||
s.drainAllServerTransportsLocked()
|
||||
} else {
|
||||
s.closeServerTransportsLocked()
|
||||
}
|
||||
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
}
|
||||
s.lis = nil
|
||||
s.conns = nil
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
// Closing the channel (only once, via grpcsync.OnceFunc) after all the
|
||||
// connections have been closed above ensures that there are no
|
||||
// goroutines executing the callback passed to st.HandleStreams (where
|
||||
// the channel is written to).
|
||||
s.serverWorkerChannelClose()
|
||||
}
|
||||
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
}
|
||||
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) closeServerTransportsLocked() {
|
||||
for _, conns := range s.conns {
|
||||
for st := range conns {
|
||||
st.Close(errors.New("Server.Stop called"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) drainAllServerTransportsLocked() {
|
||||
if !s.drain {
|
||||
for _, conns := range s.conns {
|
||||
for st := range conns {
|
||||
@ -1884,22 +1936,14 @@ func (s *Server) GracefulStop() {
|
||||
}
|
||||
s.drain = true
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
// new conns will be created.
|
||||
s.mu.Unlock()
|
||||
s.serveWG.Wait()
|
||||
s.mu.Lock()
|
||||
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) closeListenersLocked() {
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
}
|
||||
s.conns = nil
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.lis = nil
|
||||
}
|
||||
|
||||
// contentSubtype must be lowercase
|
||||
@ -1913,11 +1957,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||
}
|
||||
codec := encoding.GetCodec(contentSubtype)
|
||||
if codec == nil {
|
||||
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
|
||||
return encoding.GetCodec(proto.Name)
|
||||
}
|
||||
return codec
|
||||
}
|
||||
|
||||
type serverKey struct{}
|
||||
|
||||
// serverFromContext gets the Server from the context.
|
||||
func serverFromContext(ctx context.Context) *Server {
|
||||
s, _ := ctx.Value(serverKey{}).(*Server)
|
||||
return s
|
||||
}
|
||||
|
||||
// contextWithServer sets the Server in the context.
|
||||
func contextWithServer(ctx context.Context, server *Server) context.Context {
|
||||
return context.WithValue(ctx, serverKey{}, server)
|
||||
}
|
||||
|
||||
// isRegisteredMethod returns whether the passed in method is registered as a
|
||||
// method on the server. /service/method and service/method will match if the
|
||||
// service and method are registered on the server.
|
||||
func (s *Server) isRegisteredMethod(serviceMethod string) bool {
|
||||
if serviceMethod != "" && serviceMethod[0] == '/' {
|
||||
serviceMethod = serviceMethod[1:]
|
||||
}
|
||||
pos := strings.LastIndex(serviceMethod, "/")
|
||||
if pos == -1 { // Invalid method name syntax.
|
||||
return false
|
||||
}
|
||||
service := serviceMethod[:pos]
|
||||
method := serviceMethod[pos+1:]
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if _, ok := srv.methods[method]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := srv.streams[method]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata to be sent from the server to the client.
|
||||
// The context provided must be the context passed to the server's handler.
|
||||
//
|
||||
|
Reference in New Issue
Block a user