vendor update for E2E framework

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2019-05-31 15:15:11 +05:30
parent 9bb23e4e32
commit d300da19b7
2149 changed files with 598692 additions and 14107 deletions

View File

@ -138,6 +138,8 @@ type ClientConn interface {
ResolveNow(resolver.ResolveNowOption)
// Target returns the dial target for this ClientConn.
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string
}
@ -155,6 +157,10 @@ type BuildOptions struct {
Dialer func(context.Context, string) (net.Conn, error)
// ChannelzParentID is the entity parent's channelz unique identification number.
ChannelzParentID int64
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
// passed to the resolver.
// See the documentation for the resolver.Target type for details about what it contains.
Target resolver.Target
}
// Builder creates a balancer.
@ -171,9 +177,6 @@ type PickOptions struct {
// FullMethodName is the method name that NewClientStream() is called
// with. The canonical format is /service/Method.
FullMethodName string
// Header contains the metadata from the RPC's client header. The metadata
// should not be modified; make a copy first if needed.
Header metadata.MD
}
// DoneInfo contains additional information for done.
@ -186,6 +189,11 @@ type DoneInfo struct {
BytesSent bool
// BytesReceived indicates if any byte has been received from the server.
BytesReceived bool
// ServerLoad is the load received from server. It's usually sent as part of
// trailing metadata.
//
// The only supported type now is *orca_v1.LoadReport.
ServerLoad interface{}
}
var (
@ -215,8 +223,10 @@ type Picker interface {
//
// If a SubConn is returned:
// - If it is READY, gRPC will send the RPC on it;
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
// until UpdateBalancerState() is called and will call pick on the new picker.
// - If it is not ready, or becomes not ready after it's returned, gRPC will
// block until UpdateBalancerState() is called and will call pick on the
// new picker. The done function returned from Pick(), if not nil, will be
// called with nil error, no bytes sent and no bytes received.
//
// If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
@ -249,18 +259,46 @@ type Balancer interface {
// that back to gRPC.
// Balancer should also generate and update Pickers when its internal state has
// been changed by the new state.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateSubConnState will be called instead.
HandleSubConnStateChange(sc SubConn, state connectivity.State)
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
// balancers.
// Balancer can create new SubConn or remove SubConn with the addresses.
// An empty address slice and a non-nil error will be passed if the resolver returns
// non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateResolverState will be called instead.
HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
// SubConnState describes the state of a SubConn.
type SubConnState struct {
ConnectivityState connectivity.State
// TODO: add last connection error
}
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateResolverState method will be called instead
// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
// HandleSubConnStateChange.
type V2Balancer interface {
// UpdateResolverState is called by gRPC when the state of the resolver
// changes.
UpdateResolverState(resolver.State)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
// and returns one aggregated connectivity state.
//

View File

@ -67,14 +67,16 @@ type baseBalancer struct {
}
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil {
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
return
}
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
panic("not implemented")
}
func (b *baseBalancer) UpdateResolverState(s resolver.State) {
// TODO: handle s.Err (log if not nil) once implemented.
// TODO: handle s.ServiceConfig?
grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{})
for _, a := range addrs {
for _, a := range s.Addresses {
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns).
@ -120,6 +122,11 @@ func (b *baseBalancer) regeneratePicker() {
}
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
panic("not implemented")
}
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
oldS, ok := b.scStates[sc]
if !ok {

View File

@ -82,20 +82,13 @@ func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
return b.c
}
// resolverUpdate contains the new resolved addresses or error if there's
// any.
type resolverUpdate struct {
addrs []resolver.Address
err error
}
// ccBalancerWrapper is a wrapper on top of cc for balancers.
// It implements balancer.ClientConn interface.
type ccBalancerWrapper struct {
cc *ClientConn
balancer balancer.Balancer
stateChangeQueue *scStateUpdateBuffer
resolverUpdateCh chan *resolverUpdate
resolverUpdateCh chan *resolver.State
done chan struct{}
mu sync.Mutex
@ -106,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
ccb := &ccBalancerWrapper{
cc: cc,
stateChangeQueue: newSCStateUpdateBuffer(),
resolverUpdateCh: make(chan *resolverUpdate, 1),
resolverUpdateCh: make(chan *resolver.State, 1),
done: make(chan struct{}),
subConns: make(map[*acBalancerWrapper]struct{}),
}
@ -128,15 +121,23 @@ func (ccb *ccBalancerWrapper) watcher() {
return
default:
}
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
case t := <-ccb.resolverUpdateCh:
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
} else {
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
}
case s := <-ccb.resolverUpdateCh:
select {
case <-ccb.done:
ccb.balancer.Close()
return
default:
}
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateResolverState(*s)
} else {
ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
}
case <-ccb.done:
}
@ -177,37 +178,23 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
})
}
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
if ccb.cc.curBalancerName != grpclbName {
var containsGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
containsGRPCLB = true
break
// Filter any grpclb addresses since we don't have the grpclb balancer.
for i := 0; i < len(s.Addresses); {
if s.Addresses[i].Type == resolver.GRPCLB {
copy(s.Addresses[i:], s.Addresses[i+1:])
s.Addresses = s.Addresses[:len(s.Addresses)-1]
continue
}
}
if containsGRPCLB {
// The current balancer is not grpclb, but addresses contain grpclb
// address. This means we failed to switch to grpclb, most likely
// because grpclb is not registered. Filter out all grpclb addresses
// from addrs before sending to balancer.
tempAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
if a.Type != resolver.GRPCLB {
tempAddrs = append(tempAddrs, a)
}
}
addrs = tempAddrs
i++
}
}
select {
case <-ccb.resolverUpdateCh:
default:
}
ccb.resolverUpdateCh <- &resolverUpdate{
addrs: addrs,
err: err,
}
ccb.resolverUpdateCh <- &s
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {

View File

@ -20,7 +20,6 @@ package grpc
import (
"context"
"strings"
"sync"
"google.golang.org/grpc/balancer"
@ -34,13 +33,7 @@ type balancerWrapperBuilder struct {
}
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
targetAddr := cc.Target()
targetSplitted := strings.Split(targetAddr, ":///")
if len(targetSplitted) >= 2 {
targetAddr = targetSplitted[1]
}
bwb.b.Start(targetAddr, BalancerConfig{
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
DialCreds: opts.DialCreds,
Dialer: opts.Dialer,
})
@ -49,7 +42,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
balancer: bwb.b,
pickfirst: pickfirst,
cc: cc,
targetAddr: targetAddr,
targetAddr: opts.Target.Endpoint,
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
@ -120,7 +113,7 @@ func (bw *balancerWrapper) lbWatcher() {
}
for addrs := range notifyCh {
grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
if bw.pickfirst {
var (
oldA resolver.Address

View File

@ -42,7 +42,6 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
@ -69,11 +68,9 @@ var (
errConnClosing = errors.New("grpc: the connection is closing")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// We use an accessor so that minConnectTimeout can be
// atomically read and updated while testing.
getMinConnectTimeout = func() time.Duration {
return minConnectTimeout
}
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
)
// The following errors are returned from Dial and DialContext
@ -140,6 +137,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
opt.apply(&cc.dopts)
}
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
defer func() {
if err != nil {
cc.Close()
}
}()
if channelz.IsOn() {
if cc.dopts.channelzParentID != 0 {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
@ -179,6 +185,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
if err != nil {
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
}
cc.dopts.defaultServiceConfig = sc
}
cc.mkp = cc.dopts.copts.KeepaliveParams
if cc.dopts.copts.Dialer == nil {
@ -201,17 +214,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
defer cancel()
}
defer func() {
select {
case <-ctx.Done():
conn, err = nil, ctx.Err()
default:
}
if err != nil {
cc.Close()
}
}()
scSet := false
@ -220,7 +228,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
cc.sc = &sc
scSet = true
}
default:
@ -266,7 +274,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
cc.sc = &sc
}
case <-ctx.Done():
return nil, ctx.Err()
@ -285,6 +293,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
}
// Build the resolver.
@ -322,6 +331,68 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return cc, nil
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
func chainUnaryClientInterceptors(cc *ClientConn) {
interceptors := cc.dopts.chainUnaryInts
// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
// be executed before any other chained interceptors.
if cc.dopts.unaryInt != nil {
interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
}
var chainedInt UnaryClientInterceptor
if len(interceptors) == 0 {
chainedInt = nil
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
}
}
cc.dopts.unaryInt = chainedInt
}
// getChainUnaryInvoker recursively generate the chained unary invoker.
func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
if curr == len(interceptors)-1 {
return finalInvoker
}
return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
}
}
// chainStreamClientInterceptors chains all stream client interceptors into one.
func chainStreamClientInterceptors(cc *ClientConn) {
interceptors := cc.dopts.chainStreamInts
// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
// be executed before any other chained interceptors.
if cc.dopts.streamInt != nil {
interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
}
var chainedInt StreamClientInterceptor
if len(interceptors) == 0 {
chainedInt = nil
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
}
}
cc.dopts.streamInt = chainedInt
}
// getChainStreamer recursively generate the chained client stream constructor.
func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
if curr == len(interceptors)-1 {
return finalStreamer
}
return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
}
}
// connectivityStateManager keeps the connectivity.State of ClientConn.
// This struct will eventually be exported so the balancers can access it.
type connectivityStateManager struct {
@ -388,14 +459,11 @@ type ClientConn struct {
mu sync.RWMutex
resolverWrapper *ccResolverWrapper
sc ServiceConfig
scRaw string
sc *ServiceConfig
conns map[*addrConn]struct{}
// Keepalive parameter can be updated if a GoAway is received.
mkp keepalive.ClientParameters
curBalancerName string
preBalancerName string // previous balancer name.
curAddresses []resolver.Address
balancerWrapper *ccBalancerWrapper
retryThrottler atomic.Value
@ -437,8 +505,7 @@ func (cc *ClientConn) scWatcher() {
cc.mu.Lock()
// TODO: load balance policy runtime change is ignored.
// We may revisit this decision in the future.
cc.sc = sc
cc.scRaw = ""
cc.sc = &sc
cc.mu.Unlock()
case <-cc.ctx.Done():
return
@ -465,59 +532,84 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
}
}
func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
// gRPC should resort to default service config when:
// * resolver service config is disabled
// * or, resolver does not return a service config or returns an invalid one.
func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool {
if cc.dopts.disableServiceConfig {
return true
}
// The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type.
// Right now, we assume that empty service config string means resolver does not return a config.
if sc == "" {
return true
}
// TODO: the logic below is temporary. Once we finish the logic to validate service config
// in resolver, we will replace the logic below.
_, err := parseServiceConfig(sc)
return err != nil
}
func (cc *ClientConn) updateResolverState(s resolver.State) error {
cc.mu.Lock()
defer cc.mu.Unlock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
if cc.conns == nil {
// cc was closed.
return
return nil
}
if reflect.DeepEqual(cc.curAddresses, addrs) {
return
if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) {
if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
}
} else {
// TODO: the parsing logic below will be moved inside resolver.
sc, err := parseServiceConfig(s.ServiceConfig)
if err != nil {
return err
}
if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig {
cc.applyServiceConfig(sc)
}
}
cc.curAddresses = addrs
cc.firstResolveEvent.Fire()
// update the service config that will be sent to balancer.
if cc.sc != nil {
s.ServiceConfig = cc.sc.rawJSONString
}
if cc.dopts.balancerBuilder == nil {
// Only look at balancer types and switch balancer if balancer dial
// option is not set.
var isGRPCLB bool
for _, a := range addrs {
for _, a := range s.Addresses {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
var newBalancerName string
// TODO: use new loadBalancerConfig field with appropriate priority.
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
// Address list doesn't contain grpclb address. Try to pick a
// non-grpclb balancer.
newBalancerName = cc.curBalancerName
// If current balancer is grpclb, switch to the previous one.
if newBalancerName == grpclbName {
newBalancerName = cc.preBalancerName
}
// The following could be true in two cases:
// - the first time handling resolved addresses
// (curBalancerName="")
// - the first time handling non-grpclb addresses
// (curBalancerName="grpclb", preBalancerName="")
if newBalancerName == "" {
newBalancerName = PickFirstBalancerName
}
newBalancerName = PickFirstBalancerName
}
cc.switchBalancer(newBalancerName)
} else if cc.balancerWrapper == nil {
// Balancer dial option was set, and this is the first time handling
// resolved addresses. Build a balancer with dopts.balancerBuilder.
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
cc.balancerWrapper.updateResolverState(s)
cc.firstResolveEvent.Fire()
return nil
}
// switchBalancer starts the switching from current balancer to the balancer
@ -529,10 +621,6 @@ func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
//
// Caller must hold cc.mu.
func (cc *ClientConn) switchBalancer(name string) {
if cc.conns == nil {
return
}
if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
return
}
@ -542,15 +630,11 @@ func (cc *ClientConn) switchBalancer(name string) {
grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
return
}
// TODO(bar switching) change this to two steps: drain and close.
// Keep track of sc in wrapper.
if cc.balancerWrapper != nil {
cc.balancerWrapper.close()
}
builder := balancer.Get(name)
// TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should
// we reuse previous one?
if channelz.IsOn() {
if builder == nil {
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
@ -569,7 +653,6 @@ func (cc *ClientConn) switchBalancer(name string) {
builder = newPickfirstBuilder()
}
cc.preBalancerName = cc.curBalancerName
cc.curBalancerName = builder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
}
@ -732,6 +815,9 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
if cc.sc == nil {
return MethodConfig{}
}
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
@ -743,14 +829,15 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
cc.mu.RLock()
defer cc.mu.RUnlock()
if cc.sc == nil {
return nil
}
return cc.sc.healthCheckConfig
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
hdr, _ := metadata.FromOutgoingContext(ctx)
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
FullMethodName: method,
Header: hdr,
})
if err != nil {
return nil, nil, toRPCErr(err)
@ -758,65 +845,25 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
return t, done, nil
}
// handleServiceConfig parses the service config string in JSON format to Go native
// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
func (cc *ClientConn) handleServiceConfig(js string) error {
if cc.dopts.disableServiceConfig {
return nil
func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
if sc == nil {
// should never reach here.
return fmt.Errorf("got nil pointer for service config")
}
if cc.scRaw == js {
return nil
}
if channelz.IsOn() {
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
// The special formatting of \"%s\" instead of %q is to provide nice printing of service config
// for human consumption.
Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js),
Severity: channelz.CtINFO,
})
}
sc, err := parseServiceConfig(js)
if err != nil {
return err
}
cc.mu.Lock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
if cc.conns == nil {
cc.mu.Unlock()
return nil
}
cc.scRaw = js
cc.sc = sc
if sc.retryThrottling != nil {
if cc.sc.retryThrottling != nil {
newThrottler := &retryThrottler{
tokens: sc.retryThrottling.MaxTokens,
max: sc.retryThrottling.MaxTokens,
thresh: sc.retryThrottling.MaxTokens / 2,
ratio: sc.retryThrottling.TokenRatio,
tokens: cc.sc.retryThrottling.MaxTokens,
max: cc.sc.retryThrottling.MaxTokens,
thresh: cc.sc.retryThrottling.MaxTokens / 2,
ratio: cc.sc.retryThrottling.TokenRatio,
}
cc.retryThrottler.Store(newThrottler)
} else {
cc.retryThrottler.Store((*retryThrottler)(nil))
}
if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
if cc.curBalancerName == grpclbName {
// If current balancer is grpclb, there's at least one grpclb
// balancer address in the resolved list. Don't switch the balancer,
// but change the previous balancer name, so if a new resolved
// address list doesn't contain grpclb address, balancer will be
// switched to *sc.LB.
cc.preBalancerName = *sc.LB
} else {
cc.switchBalancer(*sc.LB)
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
}
}
cc.mu.Unlock()
return nil
}
@ -892,7 +939,7 @@ func (cc *ClientConn) Close() error {
}
channelz.AddTraceEvent(cc.channelzID, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity beng deleted, and thus prevent it from being deleted right away.
// the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(cc.channelzID)
}
return nil
@ -921,8 +968,6 @@ type addrConn struct {
// Use updateConnectivityState for updating addrConn's connectivity state.
state connectivity.State
tearDownErr error // The reason this addrConn is torn down.
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@ -963,191 +1008,169 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
func (ac *addrConn) resetTransport() {
for i := 0; ; i++ {
tryNextAddrFromStart := grpcsync.NewEvent()
ac.mu.Lock()
if i > 0 {
ac.cc.resolveNow(resolver.ResolveNowOption{})
}
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
addrs := ac.addrs
backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
// This will be the duration that dial gets to finish.
dialDuration := getMinConnectTimeout()
dialDuration := minConnectTimeout
if ac.dopts.minConnectTimeout != nil {
dialDuration = ac.dopts.minConnectTimeout()
}
if dialDuration < backoffFor {
// Give dial more time as we keep failing to connect.
dialDuration = backoffFor
}
// We can potentially spend all the time trying the first address, and
// if the server accepts the connection and then hangs, the following
// addresses will never be tried.
//
// The spec doesn't mention what should be done for multiple addresses.
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
connectDeadline := time.Now().Add(dialDuration)
ac.mu.Unlock()
addrLoop:
for _, addr := range addrs {
newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
if err != nil {
// After exhausting all addresses, the addrConn enters
// TRANSIENT_FAILURE.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.updateConnectivityState(connectivity.TransientFailure)
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
ac.cc.mu.RUnlock()
// Backoff.
b := ac.resetBackoff
ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
timer := time.NewTimer(backoffFor)
select {
case <-timer.C:
ac.mu.Lock()
ac.backoffIdx++
ac.mu.Unlock()
case <-b:
timer.Stop()
case <-ac.ctx.Done():
timer.Stop()
return
}
copts := ac.dopts.copts
if ac.scopts.CredsBundle != nil {
copts.CredsBundle = ac.scopts.CredsBundle
}
hctx, hcancel := context.WithCancel(ac.ctx)
defer hcancel()
ac.mu.Unlock()
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
Severity: channelz.CtINFO,
})
}
reconnect := grpcsync.NewEvent()
prefaceReceived := make(chan struct{})
newTr, err := ac.createTransport(addr, copts, connectDeadline, reconnect, prefaceReceived)
if err == nil {
ac.mu.Lock()
ac.curAddr = addr
ac.transport = newTr
ac.mu.Unlock()
healthCheckConfig := ac.cc.healthCheckConfig()
// LB channel health checking is only enabled when all the four requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
// 3. a service config with non-empty healthCheckConfig field is provided,
// 4. the current load balancer allows it.
healthcheckManagingState := false
if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
if ac.cc.dopts.healthCheckFunc == nil {
// TODO: add a link to the health check doc in the error message.
grpclog.Error("the client side LB channel health check function has not been set.")
} else {
// TODO(deklerk) refactor to just return transport
go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
healthcheckManagingState = true
}
}
if !healthcheckManagingState {
ac.mu.Lock()
ac.updateConnectivityState(connectivity.Ready)
ac.mu.Unlock()
}
} else {
hcancel()
if err == errConnClosing {
return
}
if tryNextAddrFromStart.HasFired() {
break addrLoop
}
continue
}
backoffFor = 0
ac.mu.Lock()
reqHandshake := ac.dopts.reqHandshake
ac.mu.Unlock()
<-reconnect.Done()
hcancel()
if reqHandshake == envconfig.RequireHandshakeHybrid {
// In RequireHandshakeHybrid mode, we must check to see whether
// server preface has arrived yet to decide whether to start
// reconnecting at the top of the list (server preface received)
// or continue with the next addr in the list as if the
// connection were not successful (server preface not received).
select {
case <-prefaceReceived:
// We received a server preface - huzzah! We consider this
// a success and restart from the top of the addr list.
ac.mu.Lock()
ac.backoffIdx = 0
ac.mu.Unlock()
break addrLoop
default:
// Despite having set state to READY, in hybrid mode we
// consider this a failure and continue connecting at the
// next addr in the list.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
ac.updateConnectivityState(connectivity.TransientFailure)
ac.mu.Unlock()
if tryNextAddrFromStart.HasFired() {
break addrLoop
}
}
} else {
// In RequireHandshakeOn mode, we would have already waited for
// the server preface, so we consider this a success and restart
// from the top of the addr list. In RequireHandshakeOff mode,
// we don't care to wait for the server preface before
// considering this a success, so we also restart from the top
// of the addr list.
ac.mu.Lock()
ac.backoffIdx = 0
ac.mu.Unlock()
break addrLoop
}
continue
}
// After exhausting all addresses, or after need to reconnect after a
// READY, the addrConn enters TRANSIENT_FAILURE.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
newTr.Close()
ac.mu.Unlock()
return
}
ac.curAddr = addr
ac.transport = newTr
ac.backoffIdx = 0
healthCheckConfig := ac.cc.healthCheckConfig()
// LB channel health checking is only enabled when all the four requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
// 3. a service config with non-empty healthCheckConfig field is provided,
// 4. the current load balancer allows it.
hctx, hcancel := context.WithCancel(ac.ctx)
healthcheckManagingState := false
if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
if ac.cc.dopts.healthCheckFunc == nil {
// TODO: add a link to the health check doc in the error message.
grpclog.Error("the client side LB channel health check function has not been set.")
} else {
// TODO(deklerk) refactor to just return transport
go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
healthcheckManagingState = true
}
}
if !healthcheckManagingState {
ac.updateConnectivityState(connectivity.Ready)
}
ac.mu.Unlock()
// Block until the created transport is down. And when this happens,
// we restart from the top of the addr list.
<-reconnect.Done()
hcancel()
// Need to reconnect after a READY, the addrConn enters
// TRANSIENT_FAILURE.
//
// This will set addrConn to TRANSIENT_FAILURE for a very short period
// of time, and turns CONNECTING. It seems reasonable to skip this, but
// READY-CONNECTING is not a valid transition.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
ac.updateConnectivityState(connectivity.TransientFailure)
// Backoff.
b := ac.resetBackoff
timer := time.NewTimer(backoffFor)
acctx := ac.ctx
ac.mu.Unlock()
select {
case <-timer.C:
ac.mu.Lock()
ac.backoffIdx++
ac.mu.Unlock()
case <-b:
timer.Stop()
case <-acctx.Done():
timer.Stop()
return
}
}
}
// createTransport creates a connection to one of the backends in addrs. It
// sets ac.transport in the success case, or it returns an error if it was
// unable to successfully create a transport.
//
// If waitForHandshake is enabled, it blocks until server preface arrives.
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time, reconnect *grpcsync.Event, prefaceReceived chan struct{}) (transport.ClientTransport, error) {
// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
// first successful one. It returns the transport, the address and a Event in
// the successful case. The Event fires when the returned transport disconnects.
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
for _, addr := range addrs {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return nil, resolver.Address{}, nil, errConnClosing
}
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
ac.cc.mu.RUnlock()
copts := ac.dopts.copts
if ac.scopts.CredsBundle != nil {
copts.CredsBundle = ac.scopts.CredsBundle
}
ac.mu.Unlock()
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
Severity: channelz.CtINFO,
})
}
newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
if err == nil {
return newTr, addr, reconnect, nil
}
ac.cc.blockingpicker.updateConnectionError(err)
}
// Couldn't connect to any address.
return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
}
// createTransport creates a connection to addr. It returns the transport and a
// Event in the successful case. The Event fires when the returned transport
// disconnects.
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
prefaceReceived := make(chan struct{})
onCloseCalled := make(chan struct{})
reconnect := grpcsync.NewEvent()
target := transport.TargetInfo{
Addr: addr.Addr,
@ -1155,8 +1178,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
Authority: ac.cc.authority,
}
prefaceTimer := time.NewTimer(time.Until(connectDeadline))
onGoAway := func(r transport.GoAwayReason) {
ac.mu.Lock()
ac.adjustParams(r)
@ -1166,13 +1187,11 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
onClose := func() {
close(onCloseCalled)
prefaceTimer.Stop()
reconnect.Fire()
}
onPrefaceReceipt := func() {
close(prefaceReceived)
prefaceTimer.Stop()
}
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
@ -1182,69 +1201,28 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
}
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
if err == nil {
if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
select {
case <-prefaceTimer.C:
// We didn't get the preface in time.
newTr.Close()
err = errors.New("timed out waiting for server handshake")
case <-prefaceReceived:
// We got the preface - huzzah! things are good.
case <-onCloseCalled:
// The transport has already closed - noop.
return nil, errors.New("connection closed")
}
} else if ac.dopts.reqHandshake == envconfig.RequireHandshakeHybrid {
go func() {
select {
case <-prefaceTimer.C:
// We didn't get the preface in time.
newTr.Close()
case <-prefaceReceived:
// We got the preface just in the nick of time - huzzah!
case <-onCloseCalled:
// The transport has already closed - noop.
}
}()
}
}
if err != nil {
// newTr is either nil, or closed.
ac.cc.blockingpicker.updateConnectionError(err)
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return nil, errConnClosing
}
ac.mu.Unlock()
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
return nil, err
return nil, nil, err
}
// Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
newTr.Close()
return nil, errConnClosing
if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
select {
case <-time.After(connectDeadline.Sub(time.Now())):
// We didn't get the preface in time.
newTr.Close()
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
return nil, nil, errors.New("timed out waiting for server handshake")
case <-prefaceReceived:
// We got the preface - huzzah! things are good.
case <-onCloseCalled:
// The transport has already closed - noop.
return nil, nil, errors.New("connection closed")
// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
}
}
ac.mu.Unlock()
// Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
newTr.Close()
return nil, errConnClosing
}
ac.mu.Unlock()
return newTr, nil
return newTr, reconnect, nil
}
func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
@ -1332,7 +1310,6 @@ func (ac *addrConn) tearDown(err error) {
// between setting the state and logic that waits on context cancelation / etc.
ac.updateConnectivityState(connectivity.Shutdown)
ac.cancel()
ac.tearDownErr = err
ac.curAddr = resolver.Address{}
if err == errConnDrain && curTr != nil {
// GracefulClose(...) may be executed multiple times when

View File

@ -132,7 +132,8 @@ const (
// Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected
// by retrying with a backoff.
// by retrying with a backoff. Note that it is not always safe to retry
// non-idempotent operations.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.

View File

@ -36,9 +36,6 @@ import (
"google.golang.org/grpc/credentials/internal"
)
// alpnProtoStr are the specified application level protocols for gRPC.
var alpnProtoStr = []string{"h2"}
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
@ -208,10 +205,23 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil
}
const alpnProtoStrH2 = "h2"
func appendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{cloneTLSConfig(c)}
tc.config.NextProtos = alpnProtoStr
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
return tc
}

View File

@ -39,8 +39,12 @@ import (
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
cp Compressor
dc Decompressor
bs backoff.Strategy
@ -55,13 +59,16 @@ type dialOptions struct {
// balancer, and also by WithBalancerName dial option.
balancerBuilder balancer.Builder
// This is to support grpclb.
resolverBuilder resolver.Builder
reqHandshake envconfig.RequireHandshakeSetting
channelzParentID int64
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
healthCheckFunc internal.HealthChecker
resolverBuilder resolver.Builder
reqHandshake envconfig.RequireHandshakeSetting
channelzParentID int64
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
healthCheckFunc internal.HealthChecker
minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
}
// DialOption configures how we set up the connection.
@ -411,6 +418,17 @@ func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
})
}
// WithChainUnaryInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithUnaryInterceptor will always be prepended to the chain.
func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
})
}
// WithStreamInterceptor returns a DialOption that specifies the interceptor for
// streaming RPCs.
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
@ -419,6 +437,17 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
})
}
// WithChainStreamInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainStreamInts = append(o.chainStreamInts, interceptors...)
})
}
// WithAuthority returns a DialOption that specifies the value to be used as the
// :authority pseudo-header. This value only works with WithInsecure and has no
// effect if TransportCredentials are present.
@ -437,15 +466,30 @@ func WithChannelzParentID(id int64) DialOption {
})
}
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
// service config provided by the resolver and provides a hint to the resolver
// to not fetch service configs.
//
// Note that this dial option only disables service config from resolver. If
// default service config is provided, gRPC will use the default service config.
func WithDisableServiceConfig() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableServiceConfig = true
})
}
// WithDefaultServiceConfig returns a DialOption that configures the default
// service config, which will be used in cases where:
// 1. WithDisableServiceConfig is called.
// 2. Resolver does not return service config or if the resolver gets and invalid config.
//
// This API is EXPERIMENTAL.
func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s
})
}
// WithDisableRetry returns a DialOption that disables retries, even if the
// service config enables them. This does not impact transparent retries, which
// will happen automatically if no data is written to the wire or if the RPC is
@ -470,7 +514,8 @@ func WithMaxHeaderListSize(s uint32) DialOption {
})
}
// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn.
// WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn.
//
// This API is EXPERIMENTAL.
func WithDisableHealthCheck() DialOption {
@ -479,8 +524,8 @@ func WithDisableHealthCheck() DialOption {
})
}
// withHealthCheckFunc replaces the default health check function with the provided one. It makes
// tests easier to change the health check function.
// withHealthCheckFunc replaces the default health check function with the
// provided one. It makes tests easier to change the health check function.
//
// For testing purpose only.
func withHealthCheckFunc(f internal.HealthChecker) DialOption {
@ -500,3 +545,14 @@ func defaultDialOptions() dialOptions {
},
}
}
// withGetMinConnectDeadline specifies the function that clientconn uses to
// get minConnectDeadline. This can be used to make connection attempts happen
// faster/slower.
//
// For testing purpose only.
func withMinConnectDeadline(f func() time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.minConnectTimeout = f
})
}

View File

@ -102,10 +102,10 @@ func RegisterCodec(codec Codec) {
if codec == nil {
panic("cannot register a nil Codec")
}
contentSubtype := strings.ToLower(codec.Name())
if contentSubtype == "" {
panic("cannot register Codec with empty string result for String()")
if codec.Name() == "" {
panic("cannot register Codec with empty string result for Name()")
}
contentSubtype := strings.ToLower(codec.Name())
registeredCodecs[contentSubtype] = codec
}

View File

@ -18,7 +18,7 @@
// Package grpclog defines logging for grpc.
//
// All logs in transport package only go to verbose level 2.
// All logs in transport and grpclb packages only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level.
//
// In the default logger,

View File

@ -0,0 +1,46 @@
/*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package balancerload defines APIs to parse server loads in trailers. The
// parsed loads are sent to balancers in DoneInfo.
package balancerload
import (
"google.golang.org/grpc/metadata"
)
// Parser converts loads from metadata into a concrete type.
type Parser interface {
// Parse parses loads from metadata.
Parse(md metadata.MD) interface{}
}
var parser Parser
// SetParser sets the load parser.
//
// Not mutex-protected, should be called before any gRPC functions.
func SetParser(lr Parser) {
parser = lr
}
// Parse calls parser.Read().
func Parse(md metadata.MD) interface{} {
if parser == nil {
return nil
}
return parser.Parse(md)
}

View File

@ -24,6 +24,7 @@
package channelz
import (
"fmt"
"sort"
"sync"
"sync/atomic"
@ -95,9 +96,14 @@ func (d *dbWrapper) get() *channelMap {
// NewChannelzStorage initializes channelz data storage and id generator.
//
// This function returns a cleanup function to wait for all channelz state to be reset by the
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
// to remove some entity just register by the new test, since the id space is the same.
//
// Note: This function is exported for testing purpose only. User should not call
// it in most cases.
func NewChannelzStorage() {
func NewChannelzStorage() (cleanup func() error) {
db.set(&channelMap{
topLevelChannels: make(map[int64]struct{}),
channels: make(map[int64]*channel),
@ -107,6 +113,28 @@ func NewChannelzStorage() {
subChannels: make(map[int64]*subChannel),
})
idGen.reset()
return func() error {
var err error
cm := db.get()
if cm == nil {
return nil
}
for i := 0; i < 1000; i++ {
cm.mu.Lock()
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
cm.mu.Unlock()
// all things stored in the channelz map have been cleared.
return nil
}
cm.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
cm.mu.Lock()
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
cm.mu.Unlock()
return err
}
}
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a

View File

@ -34,13 +34,9 @@ const (
type RequireHandshakeSetting int
const (
// RequireHandshakeHybrid (default, deprecated) indicates to not wait for
// handshake before considering a connection ready, but wait before
// considering successful.
RequireHandshakeHybrid RequireHandshakeSetting = iota
// RequireHandshakeOn (default after the 1.17 release) indicates to wait
// for handshake before considering a connection ready/successful.
RequireHandshakeOn
// RequireHandshakeOn indicates to wait for handshake before considering a
// connection ready/successful.
RequireHandshakeOn RequireHandshakeSetting = iota
// RequireHandshakeOff indicates to not wait for handshake before
// considering a connection ready/successful.
RequireHandshakeOff
@ -53,7 +49,7 @@ var (
// environment variable.
//
// Will be removed after the 1.18 release.
RequireHandshake RequireHandshakeSetting
RequireHandshake = RequireHandshakeOn
)
func init() {
@ -64,8 +60,5 @@ func init() {
RequireHandshake = RequireHandshakeOn
case "off":
RequireHandshake = RequireHandshakeOff
case "hybrid":
// Will be removed after the 1.17 release.
RequireHandshake = RequireHandshakeHybrid
}
}

View File

@ -22,18 +22,24 @@ package syscall
import (
"net"
"sync"
"time"
"google.golang.org/grpc/grpclog"
)
func init() {
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
var once sync.Once
func log() {
once.Do(func() {
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
})
}
// GetCPUTime returns the how much CPU time has passed since the start of this process.
// It always returns 0 under non-linux or appengine environment.
func GetCPUTime() int64 {
log()
return 0
}
@ -42,22 +48,26 @@ type Rusage struct{}
// GetRusage is a no-op function under non-linux or appengine environment.
func GetRusage() (rusage *Rusage) {
log()
return nil
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
// a negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) {
log()
return -1, nil
}

View File

@ -63,9 +63,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
if _, ok := w.(http.Flusher); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
}
if _, ok := w.(http.CloseNotifier); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
}
st := &serverHandlerTransport{
rw: w,
@ -176,17 +173,11 @@ func (a strAddr) String() string { return string(a) }
// do runs fn in the ServeHTTP goroutine.
func (ht *serverHandlerTransport) do(fn func()) error {
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
select {
case <-ht.closedCh:
return ErrConnClosing
default:
select {
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
}
case ht.writes <- fn:
return nil
}
}
@ -237,7 +228,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
if ht.stats != nil {
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
close(ht.writes)
}
ht.Close()
return err
@ -315,19 +305,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ctx, cancel = context.WithCancel(ctx)
}
// requestOver is closed when either the request's context is done
// or the status has been written via WriteStatus.
// requestOver is closed when the status has been written via WriteStatus.
requestOver := make(chan struct{})
// clientGone receives a single value if peer is gone, either
// because the underlying connection is dead or because the
// peer sends an http2 RST_STREAM.
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
go func() {
select {
case <-requestOver:
case <-ht.closedCh:
case <-clientGone:
case <-ht.req.Context().Done():
}
cancel()
ht.Close()
@ -407,10 +391,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
func (ht *serverHandlerTransport) runStream() {
for {
select {
case fn, ok := <-ht.writes:
if !ok {
return
}
case fn := <-ht.writes:
fn()
case <-ht.closedCh:
return

View File

@ -794,21 +794,21 @@ func (t *http2Client) Close() error {
// stream is closed. If there are no active streams, the transport is closed
// immediately. This does nothing if the transport is already draining or
// closing.
func (t *http2Client) GracefulClose() error {
func (t *http2Client) GracefulClose() {
t.mu.Lock()
// Make sure we move to draining only from active.
if t.state == draining || t.state == closing {
t.mu.Unlock()
return nil
return
}
t.state = draining
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
return t.Close()
t.Close()
return
}
t.controlBuf.put(&incomingGoAway{})
return nil
}
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
@ -1140,15 +1140,27 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok {
return
}
endStream := frame.StreamEnded()
atomic.StoreUint32(&s.bytesReceived, 1)
var state decodeState
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
// Something wrong. Stops reading even when there is remaining.
initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0
if !initialHeader && !endStream {
// As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear
// at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
return
}
state := &decodeState{}
// Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received
// which indicates peer speaking gRPC, we are in gRPC mode.
state.data.isGRPC = !initialHeader
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
return
}
endStream := frame.StreamEnded()
var isHeader bool
defer func() {
if t.statsHandler != nil {
@ -1167,29 +1179,30 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
}()
// If headers haven't been received yet.
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
if initialHeader {
if !endStream {
// Headers frame is not actually a trailers-only frame.
// Headers frame is ResponseHeader.
isHeader = true
// These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this.
s.recvCompress = state.encoding
if len(state.mdata) > 0 {
s.header = state.mdata
s.recvCompress = state.data.encoding
if len(state.data.mdata) > 0 {
s.header = state.data.mdata
}
} else {
s.noHeaders = true
close(s.headerChan)
return
}
// Headers frame is Trailers-only.
s.noHeaders = true
close(s.headerChan)
}
if !endStream {
return
}
// if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true)
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
@ -1356,6 +1369,8 @@ func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
return &s
}
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
func (t *http2Client) IncrMsgSent() {
atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())

View File

@ -286,7 +286,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
streamID := frame.Header().StreamID
state := decodeState{serverSide: true}
state := &decodeState{
serverSide: true,
}
if err := state.decodeHeader(frame); err != nil {
if se, ok := status.FromError(err); ok {
t.controlBuf.put(&cleanupStream{
@ -305,16 +307,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
st: t,
buf: buf,
fc: &inFlow{limit: uint32(t.initialWindowSize)},
recvCompress: state.encoding,
method: state.method,
contentSubtype: state.contentSubtype,
recvCompress: state.data.encoding,
method: state.data.method,
contentSubtype: state.data.contentSubtype,
}
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
if state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
if state.data.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
@ -327,19 +329,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
if len(state.data.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
}
if state.statsTags != nil {
s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
if state.data.statsTags != nil {
s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
}
if state.statsTrace != nil {
s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
if state.data.statsTrace != nil {
s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
}
if t.inTapHandle != nil {
var err error
info := &tap.Info{
FullMethodName: state.method,
FullMethodName: state.data.method,
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
@ -435,7 +437,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
if s != nil {
t.closeStream(s, true, se.Code, nil, false)
t.closeStream(s, true, se.Code, false)
} else {
t.controlBuf.put(&cleanupStream{
streamID: se.StreamID,
@ -577,7 +579,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
}
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
@ -602,11 +604,18 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
}
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
s, ok := t.getStream(f)
if !ok {
// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
if s, ok := t.getStream(f); ok {
t.closeStream(s, false, 0, false)
return
}
t.closeStream(s, false, 0, nil, false)
// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
t.controlBuf.put(&cleanupStream{
streamID: f.Header().StreamID,
rst: false,
rstCode: 0,
onWrite: func() {},
})
}
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
@ -770,7 +779,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
if t.stats != nil {
@ -834,10 +843,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
t.closeStream(s, false, 0, trailingHeader, true)
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
if t.stats != nil {
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
@ -849,6 +860,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
if _, ok := err.(ConnectionError); ok {
return err
}
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
return status.Errorf(codes.Internal, "transport: %v", err)
}
@ -1005,16 +1019,24 @@ func (t *http2Server) Close() error {
}
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; !ok {
t.mu.Unlock()
return
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
oldState = s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return oldState
}
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
}
t.mu.Unlock()
@ -1025,55 +1047,38 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
}
return oldState
}
// closeStream clears the footprint of a stream when the stream is not needed
// any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
// Mark the stream as done
oldState := s.swapState(streamDone)
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
oldState := t.deleteStream(s, eosReceived)
// If the stream is already closed, then don't put trailing header to controlbuf.
if oldState == streamDone {
return
}
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
// Deletes the stream from active streams
t.deleteStream(s, eosReceived)
cleanup := &cleanupStream{
hdr.cleanup = &cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
}
// No trailer. Puts cleanupFrame into transport's control buffer.
if hdr == nil {
t.controlBuf.put(cleanup)
return
}
// We do the check here, because of the following scenario:
// 1. closeStream is called first with a trailer. A trailer item with a piggybacked cleanup item
// is put to control buffer.
// 2. Loopy writer is waiting on a stream quota. It will never get it because client errored at
// some point. So loopy can't act on trailer
// 3. Client sends a RST_STREAM due to the error. Then closeStream is called without a trailer as
// the result of the received RST_STREAM.
// If we do this check at the beginning of the closeStream, then we won't put a cleanup item in
// response to received RST_STREAM into the control buffer and outStream in loopy writer will
// never get cleaned up.
// If the stream is already done, don't send the trailer.
if oldState == streamDone {
return
}
hdr.cleanup = cleanup
t.controlBuf.put(hdr)
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
})
}
func (t *http2Server) RemoteAddr() net.Addr {
return t.remoteAddr
}

View File

@ -78,7 +78,8 @@ var (
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
}
httpStatusConvTab = map[int]codes.Code{
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
http.StatusBadRequest: codes.Internal,
// 401 Unauthorized - UNAUTHENTICATED.
@ -98,9 +99,7 @@ var (
}
)
// Records the states during HPACK decoding. Must be reset once the
// decoding of the entire headers are finished.
type decodeState struct {
type parsedHeaderData struct {
encoding string
// statusGen caches the stream status received from the trailer the server
// sent. Client side only. Do not access directly. After all trailers are
@ -120,8 +119,30 @@ type decodeState struct {
statsTags []byte
statsTrace []byte
contentSubtype string
// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
//
// We are in gRPC mode (peer speaking gRPC) if:
// * We are client side and have already received a HEADER frame that indicates gRPC peer.
// * The header contains valid a content-type, i.e. a string starts with "application/grpc"
// And we should handle error specific to gRPC.
//
// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
// are in HTTP fallback mode, and should handle error specific to HTTP.
isGRPC bool
grpcErr error
httpErr error
contentTypeErr string
}
// decodeState configures decoding criteria and records the decoded data.
type decodeState struct {
// whether decoding on server side or not
serverSide bool
// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
// frame once decodeHeader function has been invoked and returned.
data parsedHeaderData
}
// isReservedHeader checks whether hdr belongs to HTTP2 headers
@ -202,11 +223,11 @@ func contentType(contentSubtype string) string {
}
func (d *decodeState) status() *status.Status {
if d.statusGen == nil {
if d.data.statusGen == nil {
// No status-details were provided; generate status using code/msg.
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
}
return d.statusGen
return d.data.statusGen
}
const binHdrSuffix = "-bin"
@ -244,113 +265,146 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
if frame.Truncated {
return status.Error(codes.Internal, "peer header list size exceeded limit")
}
for _, hf := range frame.Fields {
if err := d.processHeaderField(hf); err != nil {
return err
d.processHeaderField(hf)
}
if d.data.isGRPC {
if d.data.grpcErr != nil {
return d.data.grpcErr
}
if d.serverSide {
return nil
}
if d.data.rawStatusCode == nil && d.data.statusGen == nil {
// gRPC status doesn't exist.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
// will be propagated to the user.
// Otherwise, it will be ignored. In which case, status from
// a later trailer, that has StreamEnded flag set, is propagated.
code := int(codes.Unknown)
d.data.rawStatusCode = &code
}
}
if d.serverSide {
return nil
}
// If grpc status exists, no need to check further.
if d.rawStatusCode != nil || d.statusGen != nil {
return nil
// HTTP fallback mode
if d.data.httpErr != nil {
return d.data.httpErr
}
// If grpc status doesn't exist and http status doesn't exist,
// then it's a malformed header.
if d.httpStatus == nil {
return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
}
var (
code = codes.Internal // when header does not include HTTP status, return INTERNAL
ok bool
)
if *(d.httpStatus) != http.StatusOK {
code, ok := httpStatusConvTab[*(d.httpStatus)]
if d.data.httpStatus != nil {
code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
if !ok {
code = codes.Unknown
}
return status.Error(code, http.StatusText(*(d.httpStatus)))
}
// gRPC status doesn't exist and http status is OK.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
// will be propagated to the user.
// Otherwise, it will be ignored. In which case, status from
// a later trailer, that has StreamEnded flag set, is propagated.
code := int(codes.Unknown)
d.rawStatusCode = &code
return nil
return status.Error(code, d.constructHTTPErrMsg())
}
// constructErrMsg constructs error message to be returned in HTTP fallback mode.
// Format: HTTP status code and its corresponding message + content-type error message.
func (d *decodeState) constructHTTPErrMsg() string {
var errMsgs []string
if d.data.httpStatus == nil {
errMsgs = append(errMsgs, "malformed header: missing HTTP status")
} else {
errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
}
if d.data.contentTypeErr == "" {
errMsgs = append(errMsgs, "transport: missing content-type field")
} else {
errMsgs = append(errMsgs, d.data.contentTypeErr)
}
return strings.Join(errMsgs, "; ")
}
func (d *decodeState) addMetadata(k, v string) {
if d.mdata == nil {
d.mdata = make(map[string][]string)
if d.data.mdata == nil {
d.data.mdata = make(map[string][]string)
}
d.mdata[k] = append(d.mdata[k], v)
d.data.mdata[k] = append(d.data.mdata[k], v)
}
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
switch f.Name {
case "content-type":
contentSubtype, validContentType := contentSubtype(f.Value)
if !validContentType {
return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
return
}
d.contentSubtype = contentSubtype
d.data.contentSubtype = contentSubtype
// TODO: do we want to propagate the whole content-type in the metadata,
// or come up with a way to just propagate the content-subtype if it was set?
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
// in the metadata?
d.addMetadata(f.Name, f.Value)
d.data.isGRPC = true
case "grpc-encoding":
d.encoding = f.Value
d.data.encoding = f.Value
case "grpc-status":
code, err := strconv.Atoi(f.Value)
if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
return
}
d.rawStatusCode = &code
d.data.rawStatusCode = &code
case "grpc-message":
d.rawStatusMsg = decodeGrpcMessage(f.Value)
d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
case "grpc-status-details-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
return
}
s := &spb.Status{}
if err := proto.Unmarshal(v, s); err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
return
}
d.statusGen = status.FromProto(s)
d.data.statusGen = status.FromProto(s)
case "grpc-timeout":
d.timeoutSet = true
d.data.timeoutSet = true
var err error
if d.timeout, err = decodeTimeout(f.Value); err != nil {
return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
}
case ":path":
d.method = f.Value
d.data.method = f.Value
case ":status":
code, err := strconv.Atoi(f.Value)
if err != nil {
return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
return
}
d.httpStatus = &code
d.data.httpStatus = &code
case "grpc-tags-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
return
}
d.statsTags = v
d.data.statsTags = v
d.addMetadata(f.Name, string(v))
case "grpc-trace-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
return
}
d.statsTrace = v
d.data.statsTrace = v
d.addMetadata(f.Name, string(v))
default:
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
@ -359,11 +413,10 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
v, err := decodeMetadataHeader(f.Name, f.Value)
if err != nil {
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
return nil
return
}
d.addMetadata(f.Name, v)
}
return nil
}
type timeoutUnit uint8

View File

@ -327,8 +327,7 @@ func (s *Stream) TrailersOnly() (bool, error) {
if err != nil {
return false, err
}
// if !headerDone, some other connection error occurred.
return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
return s.noHeaders, nil
}
// Trailer returns the cached trailer metedata. Note that if it is not called
@ -579,9 +578,12 @@ type ClientTransport interface {
// is called only once.
Close() error
// GracefulClose starts to tear down the transport. It stops accepting
// new RPCs and wait the completion of the pending RPCs.
GracefulClose() error
// GracefulClose starts to tear down the transport: the transport will stop
// accepting new RPCs and NewStream will return error. Once all streams are
// finished, the transport will close.
//
// It does not block.
GracefulClose()
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
@ -611,6 +613,9 @@ type ClientTransport interface {
// GetGoAwayReason returns the reason why GoAway frame was received.
GetGoAwayReason() GoAwayReason
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
// IncrMsgSent increments the number of message sent through this transport.
IncrMsgSent()

View File

@ -120,6 +120,14 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
bp.mu.Unlock()
select {
case <-ctx.Done():
if connectionErr := bp.connectionError(); connectionErr != nil {
switch ctx.Err() {
case context.DeadlineExceeded:
return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
case context.Canceled:
return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
}
}
return nil, nil, ctx.Err()
case <-ch:
}
@ -165,6 +173,11 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
}
return t, done, nil
}
if done != nil {
// Calling done with nil error, no bytes sent and no bytes received.
// DoneInfo with default value works.
done(balancer.DoneInfo{})
}
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
// A valid picker always returns READY subConn. This means the state of ac

64
vendor/google.golang.org/grpc/preloader.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// This API is EXPERIMENTAL.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte
hdr []byte
payload []byte
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
ctx := s.Context()
rpcInfo, ok := rpcInfoFromContext(ctx)
if !ok {
return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
}
// check if the context has the relevant information to prepareMsg
if rpcInfo.preloaderInfo == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
}
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
// prepare the msg
data, err := encode(rpcInfo.preloaderInfo.codec, msg)
if err != nil {
return err
}
p.encodedData = data
compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
if err != nil {
return err
}
p.hdr, p.payload = msgHeader(data, compData)
return nil
}

View File

@ -47,6 +47,8 @@ const (
defaultFreq = time.Minute * 30
defaultDNSSvrPort = "53"
golang = "GO"
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
txtPrefix = "_grpc_config."
// In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config.
txtAttribute = "grpc_config="
@ -64,6 +66,9 @@ var (
var (
defaultResolver netResolver = net.DefaultResolver
// To prevent excessive re-resolution, we enforce a rate limit on DNS
// resolution requests.
minDNSResRate = 30 * time.Second
)
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
@ -239,7 +244,13 @@ func (d *dnsResolver) watcher() {
return
case <-d.t.C:
case <-d.rn:
if !d.t.Stop() {
// Before resetting a timer, it should be stopped to prevent racing with
// reads on it's channel.
<-d.t.C
}
}
result, sc := d.lookup()
// Next lookup should happen within an interval defined by d.freq. It may be
// more often due to exponential retry on empty address list.
@ -252,6 +263,16 @@ func (d *dnsResolver) watcher() {
}
d.cc.NewServiceConfig(sc)
d.cc.NewAddress(result)
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
// will be queued in d.rn.
t := time.NewTimer(minDNSResRate)
select {
case <-t.C:
case <-d.ctx.Done():
t.Stop()
return
}
}
}
@ -282,7 +303,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
}
func (d *dnsResolver) lookupTXT() string {
ss, err := d.resolver.LookupTXT(d.ctx, d.host)
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
if err != nil {
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
return ""

View File

@ -45,7 +45,7 @@ type passthroughResolver struct {
}
func (r *passthroughResolver) start() {
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}

View File

@ -98,6 +98,15 @@ type BuildOption struct {
DisableServiceConfig bool
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
Addresses []Address // Resolved addresses for the target
ServiceConfig string // JSON representation of the service config
// TODO: add Err error
// TODO: add ParsedServiceConfig interface{}
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
@ -106,17 +115,38 @@ type BuildOption struct {
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
UpdateState(State)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
}
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
// grpc passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
//
// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
// be the full target string. e.g. "foo.bar" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
//
// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
type Target struct {
Scheme string
Authority string

View File

@ -21,6 +21,7 @@ package grpc
import (
"fmt"
"strings"
"sync/atomic"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
@ -30,12 +31,12 @@ import (
// ccResolverWrapper is a wrapper on top of cc for resolvers.
// It implements resolver.ClientConnection interface.
type ccResolverWrapper struct {
cc *ClientConn
resolver resolver.Resolver
addrCh chan []resolver.Address
scCh chan string
done chan struct{}
lastAddressesCount int
cc *ClientConn
resolver resolver.Resolver
addrCh chan []resolver.Address
scCh chan string
done uint32 // accessed atomically; set to 1 when closed.
curState resolver.State
}
// split2 returns the values from strings.SplitN(s, sep, 2).
@ -82,7 +83,6 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
cc: cc,
addrCh: make(chan []resolver.Address, 1),
scCh: make(chan string, 1),
done: make(chan struct{}),
}
var err error
@ -99,57 +99,67 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
func (ccr *ccResolverWrapper) close() {
ccr.resolver.Close()
close(ccr.done)
atomic.StoreUint32(&ccr.done, 1)
}
// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
select {
case <-ccr.done:
func (ccr *ccResolverWrapper) isDone() bool {
return atomic.LoadUint32(&ccr.done) == 1
}
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
if ccr.isDone() {
return
}
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
ccr.cc.updateResolverState(s)
ccr.curState = s
}
// NewAddress is called by the resolver implementation to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
if ccr.isDone() {
return
default:
}
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(addrs)
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
}
ccr.cc.handleResolvedAddrs(addrs, nil)
ccr.curState.Addresses = addrs
ccr.cc.updateResolverState(ccr.curState)
}
// NewServiceConfig is called by the resolver implemenetion to send service
// NewServiceConfig is called by the resolver implementation to send service
// configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
select {
case <-ccr.done:
if ccr.isDone() {
return
default:
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
ccr.cc.handleServiceConfig(sc)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
}
ccr.curState.ServiceConfig = sc
ccr.cc.updateResolverState(ccr.curState)
}
func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) {
if len(addrs) == 0 && ccr.lastAddressesCount != 0 {
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: "Resolver returns an empty address list",
Severity: channelz.CtWarning,
})
} else if len(addrs) != 0 && ccr.lastAddressesCount == 0 {
var s string
for i, a := range addrs {
if a.ServerName != "" {
s += a.Addr + "(" + a.ServerName + ")"
} else {
s += a.Addr
}
if i != len(addrs)-1 {
s += " "
}
}
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s),
Severity: channelz.CtINFO,
})
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
return
}
ccr.lastAddressesCount = len(addrs)
var updates []string
if s.ServiceConfig != ccr.curState.ServiceConfig {
updates = append(updates, "service config updated")
}
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
updates = append(updates, "resolver returned an empty address list")
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
Severity: channelz.CtINFO,
})
}

View File

@ -694,14 +694,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
return nil
}
// Information about RPC
type rpcInfo struct {
failfast bool
failfast bool
preloaderInfo *compressorInfo
}
// Information about Preloader
// Responsible for storing codec, and compressors
// If stream (s) has context s.Context which stores rpcInfo that has non nil
// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
// and reuse marshalled bytes
type compressorInfo struct {
codec baseCodec
cp Compressor
comp encoding.Compressor
}
type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
preloaderInfo: &compressorInfo{
codec: codec,
cp: cp,
comp: comp,
},
})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {

View File

@ -86,7 +86,7 @@ type service struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts options
opts serverOptions
mu sync.Mutex // guards following
lis map[net.Listener]bool
@ -108,7 +108,7 @@ type Server struct {
czData *channelzData
}
type options struct {
type serverOptions struct {
creds credentials.TransportCredentials
codec baseCodec
cp Compressor
@ -131,7 +131,7 @@ type options struct {
maxHeaderListSize *uint32
}
var defaultServerOptions = options{
var defaultServerOptions = serverOptions{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
@ -140,7 +140,33 @@ var defaultServerOptions = options{
}
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
type ServerOption interface {
apply(*serverOptions)
}
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
// This API is EXPERIMENTAL.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
// funcServerOption wraps a function that modifies serverOptions into an
// implementation of the ServerOption interface.
type funcServerOption struct {
f func(*serverOptions)
}
func (fdo *funcServerOption) apply(do *serverOptions) {
fdo.f(do)
}
func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
return &funcServerOption{
f: f,
}
}
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
@ -148,9 +174,9 @@ type ServerOption func(*options)
// Zero will disable the write buffer such that each write will be on underlying connection.
// Note: A Send call may not directly translate to a write.
func WriteBufferSize(s int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.writeBufferSize = s
}
})
}
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
@ -159,25 +185,25 @@ func WriteBufferSize(s int) ServerOption {
// Zero will disable read buffer for a connection so data framer can access the underlying
// conn directly.
func ReadBufferSize(s int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.readBufferSize = s
}
})
}
// InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.initialWindowSize = s
}
})
}
// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.initialConnWindowSize = s
}
})
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
@ -187,25 +213,25 @@ func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
kp.Time = time.Second
}
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.keepaliveParams = kp
}
})
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.keepalivePolicy = kep
}
})
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
//
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
}
})
}
// RPCCompressor returns a ServerOption that sets a compressor for outbound
@ -216,9 +242,9 @@ func CustomCodec(codec Codec) ServerOption {
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.cp = cp
}
})
}
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
@ -227,9 +253,9 @@ func RPCCompressor(cp Compressor) ServerOption {
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.dc = dc
}
})
}
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
@ -243,73 +269,73 @@ func MaxMsgSize(m int) ServerOption {
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxReceiveMessageSize = m
}
})
}
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default `math.MaxInt32`.
func MaxSendMsgSize(m int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxSendMessageSize = m
}
})
}
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
}
})
}
// Creds returns a ServerOption that sets credentials for server connections.
func Creds(c credentials.TransportCredentials) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.creds = c
}
})
}
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
// server. Only one unary interceptor can be installed. The construction of multiple
// interceptors (e.g., chaining) can be implemented at the caller.
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.unaryInt != nil {
panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
})
}
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
// server. Only one stream interceptor can be installed.
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.streamInt != nil {
panic("The stream server interceptor was already set and may not be reset.")
}
o.streamInt = i
}
})
}
// InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed.
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.inTapHandle != nil {
panic("The tap handle was already set and may not be reset.")
}
o.inTapHandle = h
}
})
}
// StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.statsHandler = h
}
})
}
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
@ -319,7 +345,7 @@ func StatsHandler(h stats.Handler) ServerOption {
// The handling function has full access to the Context of the request and the
// stream, and the invocation bypasses interceptors.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.unknownStreamDesc = &StreamDesc{
StreamName: "unknown_service_handler",
Handler: streamHandler,
@ -327,7 +353,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
ClientStreams: true,
ServerStreams: true,
}
}
})
}
// ConnectionTimeout returns a ServerOption that sets the timeout for
@ -337,17 +363,17 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
//
// This API is EXPERIMENTAL.
func ConnectionTimeout(d time.Duration) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d
}
})
}
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
// of header list that the server is prepared to accept.
func MaxHeaderListSize(s uint32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxHeaderListSize = &s
}
})
}
// NewServer creates a gRPC server which has no service registered and has not
@ -355,7 +381,7 @@ func MaxHeaderListSize(s uint32) ServerOption {
func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions
for _, o := range opt {
o(&opts)
o.apply(&opts)
}
s := &Server{
lis: make(map[net.Listener]bool),
@ -614,12 +640,13 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
if err != nil {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
// If serverHandshake returns ErrConnDispatched, keep rawConn open.
// ErrConnDispatched means that the connection was dispatched away from
// gRPC; those connections should be left open.
if err != credentials.ErrConnDispatched {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
rawConn.Close()
}
rawConn.SetDeadline(time.Time{})
@ -748,10 +775,11 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
trInfo = &traceInfo{
tr: tr,
firstLine: firstLine{
client: false,
remoteAddr: st.RemoteAddr(),
},
}
trInfo.firstLine.client = false
trInfo.firstLine.remoteAddr = st.RemoteAddr()
if dl, ok := stream.Context().Deadline(); ok {
trInfo.firstLine.deadline = time.Until(dl)
}
@ -859,7 +887,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
if trInfo != nil {
defer trInfo.tr.Finish()
trInfo.firstLine.client = false
trInfo.tr.LazyLog(&trInfo.firstLine, false)
defer func() {
if err != nil && err != io.EOF {
@ -1245,7 +1272,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
service := sm[:pos]
method := sm[pos+1:]
if srv, ok := s.m[service]; ok {
srv, knownService := s.m[service]
if knownService {
if md, ok := srv.md[method]; ok {
s.processUnaryRPC(t, stream, srv, md, trInfo)
return
@ -1260,11 +1288,16 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
var errDesc string
if !knownService {
errDesc = fmt.Sprintf("unknown service %v", service)
} else {
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
}
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
trInfo.tr.LazyPrintf("%s", errDesc)
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)

View File

@ -99,6 +99,9 @@ type ServiceConfig struct {
// healthCheckConfig must be set as one of the requirement to enable LB channel
// health check.
healthCheckConfig *healthCheckConfig
// rawJSONString stores service config json string that get parsed into
// this service config struct.
rawJSONString string
}
// healthCheckConfig defines the go-native version of the LB channel health check config.
@ -238,24 +241,22 @@ type jsonSC struct {
HealthCheckConfig *healthCheckConfig
}
func parseServiceConfig(js string) (ServiceConfig, error) {
if len(js) == 0 {
return ServiceConfig{}, fmt.Errorf("no JSON service config provided")
}
func parseServiceConfig(js string) (*ServiceConfig, error) {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err
return nil, err
}
sc := ServiceConfig{
LB: rsc.LoadBalancingPolicy,
Methods: make(map[string]MethodConfig),
retryThrottling: rsc.RetryThrottling,
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
if rsc.MethodConfig == nil {
return sc, nil
return &sc, nil
}
for _, m := range *rsc.MethodConfig {
@ -265,7 +266,7 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
d, err := parseDuration(m.Timeout)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err
return nil, err
}
mc := MethodConfig{
@ -274,7 +275,7 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
}
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err
return nil, err
}
if m.MaxRequestMessageBytes != nil {
if *m.MaxRequestMessageBytes > int64(maxInt) {
@ -299,13 +300,13 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
if sc.retryThrottling != nil {
if sc.retryThrottling.MaxTokens <= 0 ||
sc.retryThrottling.MaxTokens >= 1000 ||
sc.retryThrottling.MaxTokens > 1000 ||
sc.retryThrottling.TokenRatio <= 0 {
// Illegal throttling config; disable throttling.
sc.retryThrottling = nil
}
}
return sc, nil
return &sc, nil
}
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {

View File

@ -27,6 +27,8 @@ import (
"context"
"net"
"time"
"google.golang.org/grpc/metadata"
)
// RPCStats contains stats information about RPCs.
@ -172,6 +174,9 @@ type End struct {
BeginTime time.Time
// EndTime is the time when the RPC ends.
EndTime time.Time
// Trailer contains the trailer metadata received from the server. This
// field is only valid if this End is from the client side.
Trailer metadata.MD
// Error is the error the RPC ended with. It is an error generated from
// status.Status and can be converted back to status.Status using
// status.FromError if non-nil.

View File

@ -33,6 +33,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
@ -230,17 +231,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if c.creds != nil {
callHdr.Creds = c.creds
}
var trInfo traceInfo
var trInfo *traceInfo
if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
trInfo.firstLine.client = true
trInfo = &traceInfo{
tr: trace.New("grpc.Sent."+methodFamily(method), method),
firstLine: firstLine{
client: true,
},
}
if deadline, ok := ctx.Deadline(); ok {
trInfo.firstLine.deadline = time.Until(deadline)
}
trInfo.tr.LazyLog(&trInfo.firstLine, false)
ctx = trace.NewContext(ctx, trInfo.tr)
}
ctx = newContextWithRPCInfo(ctx, c.failFast)
ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
sh := cc.dopts.copts.StatsHandler
var beginTime time.Time
if sh != nil {
@ -323,7 +328,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cs, nil
}
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
cs.attempt = &csAttempt{
cs: cs,
dc: cs.cc.dopts.dc,
@ -338,6 +343,9 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) err
if err != nil {
return err
}
if trInfo != nil {
trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
}
cs.attempt.t = t
cs.attempt.done = done
return nil
@ -414,9 +422,10 @@ type csAttempt struct {
decompSet bool
mu sync.Mutex // guards trInfo.tr
// trInfo may be nil (if EnableTracing is false).
// trInfo.tr is set when created (if EnableTracing is true),
// and cleared when the finish method is called.
trInfo traceInfo
trInfo *traceInfo
statsHandler stats.Handler
}
@ -540,7 +549,7 @@ func (cs *clientStream) retryLocked(lastErr error) error {
cs.commitAttemptLocked()
return err
}
if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
if err := cs.newAttemptLocked(nil, nil); err != nil {
return err
}
if lastErr = cs.replayBufferLocked(); lastErr == nil {
@ -668,15 +677,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if !cs.desc.ClientStreams {
cs.sentLast = true
}
data, err := encode(cs.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
if err != nil {
return err
}
compData, err := compress(data, cs.cp, cs.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
@ -811,7 +818,7 @@ func (cs *clientStream) finish(err error) {
func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
cs := a.cs
if EnableTracing {
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
@ -868,7 +875,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
}
return toRPCErr(err)
}
if EnableTracing {
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
@ -881,8 +888,9 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
})
}
if channelz.IsOn() {
@ -915,22 +923,23 @@ func (a *csAttempt) finish(err error) {
// Ending a stream with EOF indicates a success.
err = nil
}
var tr metadata.MD
if a.s != nil {
a.t.CloseStream(a.s, err)
tr = a.s.Trailer()
}
if a.done != nil {
br := false
var tr metadata.MD
if a.s != nil {
br = a.s.BytesReceived()
tr = a.s.Trailer()
}
a.done(balancer.DoneInfo{
Err: err,
Trailer: tr,
BytesSent: a.s != nil,
BytesReceived: br,
ServerLoad: balancerload.Parse(tr),
})
}
if a.statsHandler != nil {
@ -938,11 +947,12 @@ func (a *csAttempt) finish(err error) {
Client: true,
BeginTime: a.cs.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
}
a.statsHandler.HandleRPC(a.cs.ctx, end)
}
if a.trInfo.tr != nil {
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
a.trInfo.tr.LazyPrintf("RPC: [OK]")
} else {
@ -1138,15 +1148,13 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
if !as.desc.ClientStreams {
as.sentLast = true
}
data, err := encode(as.codec, m)
// load hdr, payload, data
hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
if err != nil {
return err
}
compData, err := compress(data, as.cp, as.comp)
if err != nil {
return err
}
hdr, payld := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payld) > *as.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
@ -1383,15 +1391,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
ss.t.IncrMsgSent()
}
}()
data, err := encode(ss.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
if err != nil {
return err
}
compData, err := compress(data, ss.cp, ss.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > ss.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
@ -1466,8 +1472,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
})
}
if ss.binlog != nil {
@ -1483,3 +1490,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
// prepareMsg returns the hdr, payload and data
// using the compressors passed or using the
// passed preparedmsg
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
return nil, nil, nil, err
}
compData, err := compress(data, cp, comp)
if err != nil {
return nil, nil, nil, err
}
hdr, payload = msgHeader(data, compData)
return hdr, payload, data, nil
}

View File

@ -24,6 +24,7 @@ import (
"io"
"net"
"strings"
"sync"
"time"
"golang.org/x/net/trace"
@ -53,13 +54,25 @@ type traceInfo struct {
}
// firstLine is the first line of an RPC trace.
// It may be mutated after construction; remoteAddr specifically may change
// during client-side use.
type firstLine struct {
mu sync.Mutex
client bool // whether this is a client (outgoing) RPC
remoteAddr net.Addr
deadline time.Duration // may be zero
}
func (f *firstLine) SetRemoteAddr(addr net.Addr) {
f.mu.Lock()
f.remoteAddr = addr
f.mu.Unlock()
}
func (f *firstLine) String() string {
f.mu.Lock()
defer f.mu.Unlock()
var line bytes.Buffer
io.WriteString(&line, "RPC: ")
if f.client {

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.19.1"
const Version = "1.21.0"