mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
12
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false }
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) error // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
8
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
202
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
202
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -44,8 +45,14 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// clientConnectionCounter counts the number of connections a client has
|
||||
// initiated (equal to the number of http2Clients created). Must be accessed
|
||||
// atomically.
|
||||
var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
@ -62,8 +69,6 @@ type http2Client struct {
|
||||
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
|
||||
// that the server sent GoAway on this transport.
|
||||
goAway chan struct{}
|
||||
// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
|
||||
awakenKeepalive chan struct{}
|
||||
|
||||
framer *framer
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
@ -77,9 +82,6 @@ type http2Client struct {
|
||||
|
||||
perRPCCreds []credentials.PerRPCCredentials
|
||||
|
||||
// Boolean to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
@ -110,6 +112,16 @@ type http2Client struct {
|
||||
// goAwayReason records the http2.ErrCode and debug data received with the
|
||||
// GoAway frame.
|
||||
goAwayReason GoAwayReason
|
||||
// A condition variable used to signal when the keepalive goroutine should
|
||||
// go dormant. The condition for dormancy is based on the number of active
|
||||
// streams and the `PermitWithoutStream` keepalive client parameter. And
|
||||
// since the number of active streams is guarded by the above mutex, we use
|
||||
// the same for this condition variable as well.
|
||||
kpDormancyCond *sync.Cond
|
||||
// A boolean to track whether the keepalive goroutine is dormant or not.
|
||||
// This is checked before attempting to signal the above condition
|
||||
// variable.
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
@ -119,6 +131,8 @@ type http2Client struct {
|
||||
onClose func()
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||
@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
awakenKeepalive: make(chan struct{}, 1),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
scheme: scheme,
|
||||
@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
// Make sure awakenKeepalive can't be written upon.
|
||||
// keepalive routine will make it writable, if need be.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
|
||||
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
ct: t,
|
||||
done: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
@ -380,23 +394,23 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
}
|
||||
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
|
||||
aud := t.createAudience(callHdr)
|
||||
authData, err := t.getTrAuthData(ctx, aud)
|
||||
ri := credentials.RequestInfo{
|
||||
Method: callHdr.Method,
|
||||
}
|
||||
ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
|
||||
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
|
||||
callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -419,6 +433,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
@ -564,7 +579,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
hdr := &headerFrame{
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
initStream: func(id uint32) (bool, error) {
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
t.mu.Unlock()
|
||||
@ -574,29 +589,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
t.activeStreams[id] = s
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
}
|
||||
var sendPing bool
|
||||
// If the number of active streams change from 0 to 1, then check if keepalive
|
||||
// has gone dormant. If so, wake it up.
|
||||
if len(t.activeStreams) == 1 && t.keepaliveEnabled {
|
||||
select {
|
||||
case t.awakenKeepalive <- struct{}{}:
|
||||
sendPing = true
|
||||
// Fill the awakenKeepalive channel again as this channel must be
|
||||
// kept non-writable except at the point that the keepalive()
|
||||
// goroutine is waiting either to be awaken or shutdown.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
default:
|
||||
}
|
||||
// If the keepalive goroutine has gone dormant, wake it up.
|
||||
if t.kpDormant {
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return sendPing, nil
|
||||
return nil
|
||||
},
|
||||
onOrphaned: cleanup,
|
||||
wq: s.wq,
|
||||
@ -674,12 +679,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
header, _, _ := metadata.FromOutgoingContextRaw(ctx)
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
@ -778,6 +785,11 @@ func (t *http2Client) Close() error {
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
if t.kpDormant {
|
||||
// If the keepalive goroutine is blocked on this condition variable, we
|
||||
// should unblock it so that the goroutine eventually exits.
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
@ -853,11 +865,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
return t.controlBuf.put(df)
|
||||
}
|
||||
|
||||
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
|
||||
func (t *http2Client) getStream(f http2.Frame) *Stream {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
return s, ok
|
||||
s := t.activeStreams[f.Header().StreamID]
|
||||
t.mu.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// adjustWindow sends out extra window update over the initial window size
|
||||
@ -937,8 +949,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
t.controlBuf.put(bdpPing)
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
@ -969,8 +981,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
}
|
||||
|
||||
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeRefusedStream {
|
||||
@ -1147,8 +1159,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
|
||||
// operateHeaders takes action on the decoded headers.
|
||||
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
s, ok := t.getStream(frame)
|
||||
if !ok {
|
||||
s := t.getStream(frame)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
endStream := frame.StreamEnded()
|
||||
@ -1177,12 +1189,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
@ -1191,6 +1205,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
if !endStream {
|
||||
// HEADERS frame block carries a Response-Headers.
|
||||
isHeader = true
|
||||
@ -1233,7 +1248,7 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
@ -1248,7 +1263,7 @@ func (t *http2Client) reader() {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
if err != nil {
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
@ -1292,56 +1307,83 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
}
|
||||
|
||||
func minTime(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
timeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
timer := time.NewTimer(t.kp.Time)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were here.
|
||||
outstandingPing = false
|
||||
// Next timer should fire at kp.Time seconds from lastRead time.
|
||||
timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
// Check if keepalive should go dormant.
|
||||
if outstandingPing && timeoutLeft <= 0 {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
// If the transport is closing, we should exit from the
|
||||
// keepalive goroutine here. If not, we could have a race
|
||||
// between the call to Signal() from Close() and the call to
|
||||
// Wait() here, whereby the keepalive goroutine ends up
|
||||
// blocking on the condition variable which will never be
|
||||
// signalled again.
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
|
||||
// Make awakenKeepalive writable.
|
||||
<-t.awakenKeepalive
|
||||
t.mu.Unlock()
|
||||
select {
|
||||
case <-t.awakenKeepalive:
|
||||
// If the control gets here a ping has been sent
|
||||
// need to reset the timer with keepalive.Timeout.
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
t.mu.Unlock()
|
||||
// If a ping was sent out previously (because there were active
|
||||
// streams at that point) which wasn't acked and its timeout
|
||||
// hadn't fired, but we got here and are about to go dormant,
|
||||
// we should make sure that we unconditionally send a ping once
|
||||
// we awaken.
|
||||
outstandingPing = false
|
||||
t.kpDormant = true
|
||||
t.kpDormancyCond.Wait()
|
||||
}
|
||||
t.kpDormant = false
|
||||
t.mu.Unlock()
|
||||
|
||||
// We get here either because we were dormant and a new stream was
|
||||
// created which unblocked the Wait() call, or because the
|
||||
// keepalive timer expired. In both cases, we need to send a ping.
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
// Send ping.
|
||||
t.controlBuf.put(p)
|
||||
timeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
|
||||
// By the time control gets here a ping has been sent one way or the other.
|
||||
timer.Reset(t.kp.Timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
continue
|
||||
}
|
||||
infof("transport: closing client transport due to idleness.")
|
||||
t.Close()
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
}
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, timeoutLeft)
|
||||
timeoutLeft -= sleepDuration
|
||||
timer.Reset(sleepDuration)
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
|
152
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
152
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -62,11 +62,15 @@ var (
|
||||
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
// (equal to the number of http2Servers created). Must be accessed atomically.
|
||||
var serverConnectionCounter uint64
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // Cache the context.Done() chan
|
||||
cancel context.CancelFunc
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
loopy *loopyWriter
|
||||
readerDone chan struct{} // sync point to enable testing.
|
||||
@ -84,12 +88,8 @@ type http2Server struct {
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
// Flag to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
|
||||
// Keepalive enforcement policy.
|
||||
kep keepalive.EnforcementPolicy
|
||||
// The time instance last ping was received.
|
||||
@ -125,6 +125,8 @@ type http2Server struct {
|
||||
channelzID int64 // channelz unique identification number
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
@ -175,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
Val: *config.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
if config.HeaderTableSize != nil {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingHeaderTableSize,
|
||||
Val: *config.HeaderTableSize,
|
||||
})
|
||||
}
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||
}
|
||||
@ -206,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if kep.MinTime == 0 {
|
||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan struct{})
|
||||
t := &http2Server{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
ctxDone: ctx.Done(),
|
||||
ctx: context.Background(),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
@ -231,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
t.bdpEst = &bdpEstimator{
|
||||
bdp: initialWindowSize,
|
||||
@ -249,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
@ -273,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
||||
}
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||
@ -362,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
@ -378,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
s.cancel()
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
@ -408,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(state.data.mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
@ -441,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
@ -749,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
@ -800,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
outHeader := &stats.OutHeader{}
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
@ -863,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -885,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
||||
s.cancel()
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
@ -907,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
}
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
@ -924,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
// after an additional duration of keepalive.Timeout.
|
||||
func (t *http2Server) keepalive() {
|
||||
p := &ping{}
|
||||
var pingSent bool
|
||||
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
keepalive := time.NewTimer(t.kp.Time)
|
||||
// NOTE: All exit paths of this function should reset their
|
||||
// respective timers. A failure to do so will cause the
|
||||
// following clean-up to deadlock and eventually leak.
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
kpTimeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
// Initialize the different timers to their default values.
|
||||
idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
kpTimer := time.NewTimer(t.kp.Time)
|
||||
defer func() {
|
||||
if !maxIdle.Stop() {
|
||||
<-maxIdle.C
|
||||
}
|
||||
if !maxAge.Stop() {
|
||||
<-maxAge.C
|
||||
}
|
||||
if !keepalive.Stop() {
|
||||
<-keepalive.C
|
||||
}
|
||||
// We need to drain the underlying channel in these timers after a call
|
||||
// to Stop(), only if we are interested in resetting them. Clearly we
|
||||
// are not interested in resetting them here.
|
||||
idleTimer.Stop()
|
||||
ageTimer.Stop()
|
||||
kpTimer.Stop()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-maxIdle.C:
|
||||
case <-idleTimer.C:
|
||||
t.mu.Lock()
|
||||
idle := t.idle
|
||||
if idle.IsZero() { // The connection is non-idle.
|
||||
t.mu.Unlock()
|
||||
maxIdle.Reset(t.kp.MaxConnectionIdle)
|
||||
idleTimer.Reset(t.kp.MaxConnectionIdle)
|
||||
continue
|
||||
}
|
||||
val := t.kp.MaxConnectionIdle - time.Since(idle)
|
||||
@ -958,44 +980,52 @@ func (t *http2Server) keepalive() {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxIdle.Reset(infinity)
|
||||
return
|
||||
}
|
||||
maxIdle.Reset(val)
|
||||
case <-maxAge.C:
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-maxAge.C:
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
infof("transport: closing server transport due to maximum connection age.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxAge.Reset(infinity)
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
case <-keepalive.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
pingSent = false
|
||||
keepalive.Reset(t.kp.Time)
|
||||
case <-kpTimer.C:
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were
|
||||
// here. Setup the timer to fire at kp.Time seconds from
|
||||
// lastRead time and continue.
|
||||
outstandingPing = false
|
||||
kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
if pingSent {
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
infof("transport: closing server transport due to idleness.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
keepalive.Reset(infinity)
|
||||
return
|
||||
}
|
||||
pingSent = true
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
kpTimeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
keepalive.Reset(t.kp.Timeout)
|
||||
case <-t.ctx.Done():
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
|
||||
kpTimeoutLeft -= sleepDuration
|
||||
kpTimer.Reset(sleepDuration)
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1015,7 +1045,7 @@ func (t *http2Server) Close() error {
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
close(t.done)
|
||||
err := t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
@ -1155,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-timer.C:
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
|
||||
@ -1205,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||
select {
|
||||
case sz := <-resp:
|
||||
return int64(sz)
|
||||
case <-t.ctxDone:
|
||||
case <-t.done:
|
||||
return -1
|
||||
case <-timer.C:
|
||||
return -2
|
||||
|
70
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
70
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -73,10 +73,11 @@ type recvMsg struct {
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
// Note recvBuffer differs from controlBuffer only in that recvBuffer
|
||||
// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
|
||||
// recvBuffer is written to much more often than
|
||||
// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
|
||||
//
|
||||
// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
|
||||
// holds a channel of recvMsg structs instead of objects implementing "item"
|
||||
// interface. recvBuffer is written to much more often and using strict recvMsg
|
||||
// structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
@ -233,6 +234,7 @@ const (
|
||||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ct *http2Client // nil for server side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
@ -251,6 +253,10 @@ type Stream struct {
|
||||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
@ -303,34 +309,28 @@ func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
func (s *Stream) waitOnHeader() {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// We prefer success over failure when reading messages because we delay
|
||||
// context error in stream.Read(). To keep behavior consistent, we also
|
||||
// prefer success here.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
// Close the stream to prevent headers/trailers from changing after
|
||||
// this function returns.
|
||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
||||
// headerChan could possibly not be closed yet if closeStream raced
|
||||
// with operateHeaders; wait until it is closed explicitly here.
|
||||
<-s.headerChan
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
s.waitOnHeader()
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} {
|
||||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||
// metadata or iii) the stream is canceled/expired.
|
||||
//
|
||||
// On server side, it returns the out header after t.WriteHeader is called.
|
||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
||||
// does not block and must not be called until after WriteHeader.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
if s.headerChan == nil && s.header != nil {
|
||||
if s.headerChan == nil {
|
||||
// On server side, return the header in stream. It will be the out
|
||||
// header after t.WriteHeader is called.
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
s.waitOnHeader()
|
||||
if !s.headerValid {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
return nil, err
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return s.noHeaders, nil
|
||||
// before headers are received, returns true, nil. Client-side only.
|
||||
func (s *Stream) TrailersOnly() bool {
|
||||
s.waitOnHeader()
|
||||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
@ -534,6 +525,7 @@ type ServerConfig struct {
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
Reference in New Issue
Block a user