mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
update vendor to latest kubernetes 1.14.0
some of the kubernetes independent packages are moved out of the tree to new projects. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
3f35bfd4d7
commit
f60a07ae82
6
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
@ -180,7 +180,7 @@ func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err
|
||||
if s := match[1]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return 0, msgLenStr, nil
|
||||
}
|
||||
@ -195,13 +195,13 @@ func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
if s := match[2]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
return hdrLenStr, msgLenStr, nil
|
||||
|
5
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
5
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@ -377,10 +377,7 @@ func metadataKeyOmit(key string) bool {
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(key, "grpc-") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
|
89
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
89
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -40,7 +40,7 @@ var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = 50
|
||||
EntryPerPage = int64(50)
|
||||
curState int32
|
||||
maxTraceEntry = defaultMaxTraceEntry
|
||||
)
|
||||
@ -113,20 +113,20 @@ func NewChannelzStorage() {
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id)
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
// boolean indicating whether there's more servers to be queried for.
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id)
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
@ -134,10 +134,10 @@ func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of EntryPerPage,
|
||||
// and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID)
|
||||
// included in the result. The returned slice is up to a length of the arg maxResults
|
||||
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID, maxResults)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
@ -155,6 +155,11 @@ func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
}
|
||||
|
||||
// GetServer returns the ServerMetric for the server (identified by id).
|
||||
func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
@ -447,29 +452,32 @@ func copyMap(m map[int64]string) map[int64]string {
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := len(c.topLevelChannels)
|
||||
l := int64(len(c.topLevelChannels))
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, EntryPerPage))
|
||||
cns := make([]*channel, 0, min(l, maxResults))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
@ -499,21 +507,24 @@ func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := len(c.servers)
|
||||
l := int64(len(c.servers))
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, EntryPerPage))
|
||||
ss := make([]*server, 0, min(l, maxResults))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
@ -541,7 +552,10 @@ func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
@ -551,18 +565,18 @@ func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric,
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := len(svrskts)
|
||||
l := int64(len(svrskts))
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
|
||||
sks := make([]*normalSocket, 0, min(l, maxResults))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||
count := 0
|
||||
count := int64(0)
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
@ -655,6 +669,23 @@ func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||
sm := &ServerMetric{}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
sm.ListenSockets = copyMap(svr.listenSockets)
|
||||
c.mu.RUnlock()
|
||||
sm.ID = svr.id
|
||||
sm.RefName = svr.refName
|
||||
sm.ServerData = svr.s.ChannelzMetric()
|
||||
return sm
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
4
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -34,7 +34,7 @@ const (
|
||||
type RequireHandshakeSetting int
|
||||
|
||||
const (
|
||||
// RequireHandshakeHybrid (default, deprecated) indicates to wait for
|
||||
// RequireHandshakeHybrid (default, deprecated) indicates to not wait for
|
||||
// handshake before considering a connection ready, but wait before
|
||||
// considering successful.
|
||||
RequireHandshakeHybrid RequireHandshakeSetting = iota
|
||||
@ -59,6 +59,8 @@ var (
|
||||
func init() {
|
||||
switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
|
||||
case "on":
|
||||
fallthrough
|
||||
default:
|
||||
RequireHandshake = RequireHandshakeOn
|
||||
case "off":
|
||||
RequireHandshake = RequireHandshakeOff
|
||||
|
21
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -20,17 +20,28 @@
|
||||
// symbols to avoid circular dependencies.
|
||||
package internal
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// WithContextDialer is exported by clientconn.go
|
||||
WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
|
||||
// WithResolverBuilder is exported by clientconn.go
|
||||
// WithResolverBuilder is exported by dialoptions.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
// WithHealthCheckFunc is not exported by dialoptions.go
|
||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
|
||||
HealthCheckFunc HealthChecker
|
||||
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||
BalancerUnregister func(name string)
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
CredsBundleModeFallback = "fallback"
|
||||
|
21
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -91,10 +91,10 @@ type http2Client struct {
|
||||
maxSendHeaderListSize *uint32
|
||||
|
||||
bdpEst *bdpEstimator
|
||||
// onSuccess is a callback that client transport calls upon
|
||||
// onPrefaceReceipt is a callback that client transport calls upon
|
||||
// receiving server preface to signal that a succefull HTTP2
|
||||
// connection was established.
|
||||
onSuccess func()
|
||||
onPrefaceReceipt func()
|
||||
|
||||
maxConcurrentStreams uint32
|
||||
streamQuota int64
|
||||
@ -145,7 +145,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@ -240,7 +240,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
kp: kp,
|
||||
statsHandler: opts.StatsHandler,
|
||||
initialWindowSize: initialWindowSize,
|
||||
onSuccess: onSuccess,
|
||||
onPrefaceReceipt: onPrefaceReceipt,
|
||||
nextID: 1,
|
||||
maxConcurrentStreams: defaultMaxStreamsClient,
|
||||
streamQuota: defaultMaxStreamsClient,
|
||||
@ -322,7 +322,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
|
||||
t.framer.writer.Flush()
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
@ -362,6 +364,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
ctx: s.ctx,
|
||||
ctxDone: s.ctx.Done(),
|
||||
recv: s.buf,
|
||||
closeStream: func(err error) {
|
||||
t.CloseStream(s, err)
|
||||
},
|
||||
},
|
||||
windowHandler: func(n int) {
|
||||
t.updateWindow(s, uint32(n))
|
||||
@ -414,7 +419,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
||||
timeout := dl.Sub(time.Now())
|
||||
timeout := time.Until(dl)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||
}
|
||||
for k, v := range authData {
|
||||
@ -780,7 +785,7 @@ func (t *http2Client) Close() error {
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
go t.onClose()
|
||||
t.onClose()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1210,7 +1215,7 @@ func (t *http2Client) reader() {
|
||||
t.Close() // this kicks off resetTransport, so must be last before return
|
||||
return
|
||||
}
|
||||
t.onSuccess()
|
||||
t.onPrefaceReceipt()
|
||||
t.handleSettings(sf, true)
|
||||
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
|
81
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
81
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -1004,45 +1004,74 @@ func (t *http2Server) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteStream deletes the stream s from transport's active streams.
|
||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
t.mu.Lock()
|
||||
if _, ok := t.activeStreams[s.id]; !ok {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
delete(t.activeStreams, s.id)
|
||||
if len(t.activeStreams) == 0 {
|
||||
t.idle = time.Now()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
|
||||
if channelz.IsOn() {
|
||||
if eosReceived {
|
||||
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||
} else {
|
||||
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closeStream clears the footprint of a stream when the stream is not needed
|
||||
// any more.
|
||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||
if s.swapState(streamDone) == streamDone {
|
||||
// If the stream was already done, return.
|
||||
return
|
||||
}
|
||||
// Mark the stream as done
|
||||
oldState := s.swapState(streamDone)
|
||||
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
// Deletes the stream from active streams
|
||||
t.deleteStream(s, eosReceived)
|
||||
|
||||
cleanup := &cleanupStream{
|
||||
streamID: s.id,
|
||||
rst: rst,
|
||||
rstCode: rstCode,
|
||||
onWrite: func() {
|
||||
t.mu.Lock()
|
||||
if t.activeStreams != nil {
|
||||
delete(t.activeStreams, s.id)
|
||||
if len(t.activeStreams) == 0 {
|
||||
t.idle = time.Now()
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
if channelz.IsOn() {
|
||||
if eosReceived {
|
||||
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||
} else {
|
||||
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||
}
|
||||
}
|
||||
},
|
||||
onWrite: func() {},
|
||||
}
|
||||
if hdr != nil {
|
||||
hdr.cleanup = cleanup
|
||||
t.controlBuf.put(hdr)
|
||||
} else {
|
||||
|
||||
// No trailer. Puts cleanupFrame into transport's control buffer.
|
||||
if hdr == nil {
|
||||
t.controlBuf.put(cleanup)
|
||||
return
|
||||
}
|
||||
|
||||
// We do the check here, because of the following scenario:
|
||||
// 1. closeStream is called first with a trailer. A trailer item with a piggybacked cleanup item
|
||||
// is put to control buffer.
|
||||
// 2. Loopy writer is waiting on a stream quota. It will never get it because client errored at
|
||||
// some point. So loopy can't act on trailer
|
||||
// 3. Client sends a RST_STREAM due to the error. Then closeStream is called without a trailer as
|
||||
// the result of the received RST_STREAM.
|
||||
// If we do this check at the beginning of the closeStream, then we won't put a cleanup item in
|
||||
// response to received RST_STREAM into the control buffer and outStream in loopy writer will
|
||||
// never get cleaned up.
|
||||
|
||||
// If the stream is already done, don't send the trailer.
|
||||
if oldState == streamDone {
|
||||
return
|
||||
}
|
||||
|
||||
hdr.cleanup = cleanup
|
||||
t.controlBuf.put(hdr)
|
||||
}
|
||||
|
||||
func (t *http2Server) RemoteAddr() net.Addr {
|
||||
@ -1155,7 +1184,7 @@ func (t *http2Server) IncrMsgRecv() {
|
||||
}
|
||||
|
||||
func (t *http2Server) getOutFlowWindow() int64 {
|
||||
resp := make(chan uint32)
|
||||
resp := make(chan uint32, 1)
|
||||
timer := time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
t.controlBuf.put(&outFlowControlSizeRequest{resp})
|
||||
|
62
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
62
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -110,15 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg {
|
||||
return b.c
|
||||
}
|
||||
|
||||
//
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last []byte // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last []byte // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||
@ -128,31 +128,53 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
n, r.err = r.read(p)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
if r.last != nil && len(r.last) > 0 {
|
||||
// Read remaining data left in last call.
|
||||
copied := copy(p, r.last)
|
||||
r.last = r.last[copied:]
|
||||
return copied, nil
|
||||
}
|
||||
if r.closeStream != nil {
|
||||
n, r.err = r.readClient(p)
|
||||
} else {
|
||||
n, r.err = r.read(p)
|
||||
}
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case m := <-r.recv.get():
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
return 0, m.err
|
||||
}
|
||||
copied := copy(p, m.data)
|
||||
r.last = m.data[copied:]
|
||||
return copied, nil
|
||||
return r.readAdditional(m, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
|
||||
// If the context is canceled, then closes the stream with nil metadata.
|
||||
// closeStream writes its error parameter to r.recv as a recvMsg.
|
||||
// r.readAdditional acts on that message and returns the necessary error.
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
r.closeStream(ContextErr(r.ctx.Err()))
|
||||
m := <-r.recv.get()
|
||||
return r.readAdditional(m, p)
|
||||
case m := <-r.recv.get():
|
||||
return r.readAdditional(m, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
return 0, m.err
|
||||
}
|
||||
copied := copy(p, m.data)
|
||||
r.last = m.data[copied:]
|
||||
return copied, nil
|
||||
}
|
||||
|
||||
type streamState uint32
|
||||
|
||||
const (
|
||||
@ -511,8 +533,8 @@ type TargetInfo struct {
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
Reference in New Issue
Block a user