Fix vendor out of sync issue

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2019-07-18 17:43:24 +05:30
committed by mergify[bot]
parent 21a02fb559
commit e128caddc5
84 changed files with 2686 additions and 704 deletions

View File

@ -22,6 +22,7 @@ package balancer
import (
"context"
"encoding/json"
"errors"
"net"
"strings"
@ -31,6 +32,7 @@ import (
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
var (
@ -39,7 +41,10 @@ var (
)
// Register registers the balancer builder to the balancer map. b.Name
// (lowercased) will be used as the name registered with this builder.
// (lowercased) will be used as the name registered with this builder. If the
// Builder implements ConfigParser, ParseConfig will be called when new service
// configs are received by the resolver, and the result will be provided to the
// Balancer in UpdateClientConnState.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Balancers are
@ -172,6 +177,14 @@ type Builder interface {
Name() string
}
// ConfigParser parses load balancer configs.
type ConfigParser interface {
// ParseConfig parses the JSON load balancer config provided into an
// internal form or returns an error if the config is invalid. For future
// compatibility reasons, unknown fields in the config should be ignored.
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
}
// PickOptions contains addition information for the Pick operation.
type PickOptions struct {
// FullMethodName is the method name that NewClientStream() is called
@ -270,7 +283,7 @@ type Balancer interface {
// non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateResolverState will be called instead.
// UpdateClientConnState will be called instead.
HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
@ -283,14 +296,23 @@ type SubConnState struct {
// TODO: add last connection error
}
// ClientConnState describes the state of a ClientConn relevant to the
// balancer.
type ClientConnState struct {
ResolverState resolver.State
// The parsed load balancing configuration returned by the builder's
// ParseConfig method, if implemented.
BalancerConfig serviceconfig.LoadBalancingConfig
}
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateResolverState method will be called instead
// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
// HandleSubConnStateChange.
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateResolverState is called by gRPC when the state of the resolver
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes.
UpdateResolverState(resolver.State)
UpdateClientConnState(ClientConnState)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)

View File

@ -70,13 +70,13 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error)
panic("not implemented")
}
func (b *baseBalancer) UpdateResolverState(s resolver.State) {
// TODO: handle s.Err (log if not nil) once implemented.
// TODO: handle s.ServiceConfig?
grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{})
for _, a := range s.Addresses {
for _, a := range s.ResolverState.Addresses {
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns).

View File

@ -88,7 +88,7 @@ type ccBalancerWrapper struct {
cc *ClientConn
balancer balancer.Balancer
stateChangeQueue *scStateUpdateBuffer
resolverUpdateCh chan *resolver.State
ccUpdateCh chan *balancer.ClientConnState
done chan struct{}
mu sync.Mutex
@ -99,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
ccb := &ccBalancerWrapper{
cc: cc,
stateChangeQueue: newSCStateUpdateBuffer(),
resolverUpdateCh: make(chan *resolver.State, 1),
ccUpdateCh: make(chan *balancer.ClientConnState, 1),
done: make(chan struct{}),
subConns: make(map[*acBalancerWrapper]struct{}),
}
@ -126,7 +126,7 @@ func (ccb *ccBalancerWrapper) watcher() {
} else {
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
}
case s := <-ccb.resolverUpdateCh:
case s := <-ccb.ccUpdateCh:
select {
case <-ccb.done:
ccb.balancer.Close()
@ -134,9 +134,9 @@ func (ccb *ccBalancerWrapper) watcher() {
default:
}
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateResolverState(*s)
ub.UpdateClientConnState(*s)
} else {
ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
}
case <-ccb.done:
}
@ -151,9 +151,11 @@ func (ccb *ccBalancerWrapper) watcher() {
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
ccb.UpdateBalancerState(connectivity.Connecting, nil)
return
default:
}
ccb.cc.firstResolveEvent.Fire()
}
}
@ -178,9 +180,10 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
})
}
func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
if ccb.cc.curBalancerName != grpclbName {
// Filter any grpclb addresses since we don't have the grpclb balancer.
s := ccs.ResolverState
for i := 0; i < len(s.Addresses); {
if s.Addresses[i].Type == resolver.GRPCLB {
copy(s.Addresses[i:], s.Addresses[i+1:])
@ -191,10 +194,10 @@ func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
}
}
select {
case <-ccb.resolverUpdateCh:
case <-ccb.ccUpdateCh:
default:
}
ccb.resolverUpdateCh <- &s
ccb.ccUpdateCh <- ccs
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {

View File

@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/status"
)
@ -532,24 +533,6 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
}
}
// gRPC should resort to default service config when:
// * resolver service config is disabled
// * or, resolver does not return a service config or returns an invalid one.
func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool {
if cc.dopts.disableServiceConfig {
return true
}
// The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type.
// Right now, we assume that empty service config string means resolver does not return a config.
if sc == "" {
return true
}
// TODO: the logic below is temporary. Once we finish the logic to validate service config
// in resolver, we will replace the logic below.
_, err := parseServiceConfig(sc)
return err != nil
}
func (cc *ClientConn) updateResolverState(s resolver.State) error {
cc.mu.Lock()
defer cc.mu.Unlock()
@ -560,44 +543,37 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error {
return nil
}
if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) {
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
}
} else {
// TODO: the parsing logic below will be moved inside resolver.
sc, err := parseServiceConfig(s.ServiceConfig)
if err != nil {
return err
}
if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig {
cc.applyServiceConfig(sc)
}
}
// update the service config that will be sent to balancer.
if cc.sc != nil {
s.ServiceConfig = cc.sc.rawJSONString
} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
cc.applyServiceConfig(sc)
}
var balCfg serviceconfig.LoadBalancingConfig
if cc.dopts.balancerBuilder == nil {
// Only look at balancer types and switch balancer if balancer dial
// option is not set.
var isGRPCLB bool
for _, a := range s.Addresses {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
var newBalancerName string
// TODO: use new loadBalancerConfig field with appropriate priority.
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
if cc.sc != nil && cc.sc.lbConfig != nil {
newBalancerName = cc.sc.lbConfig.name
balCfg = cc.sc.lbConfig.cfg
} else {
newBalancerName = PickFirstBalancerName
var isGRPCLB bool
for _, a := range s.Addresses {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
newBalancerName = PickFirstBalancerName
}
}
cc.switchBalancer(newBalancerName)
} else if cc.balancerWrapper == nil {
@ -607,8 +583,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error {
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
cc.balancerWrapper.updateResolverState(s)
cc.firstResolveEvent.Fire()
cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
return nil
}
@ -621,7 +596,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error {
//
// Caller must hold cc.mu.
func (cc *ClientConn) switchBalancer(name string) {
if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
if strings.EqualFold(cc.curBalancerName, name) {
return
}
@ -760,6 +735,8 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
// Update connectivity state within the lock to prevent subsequent or
// concurrent calls from resetting the transport more than once.
ac.updateConnectivityState(connectivity.Connecting)
ac.mu.Unlock()
@ -770,7 +747,16 @@ func (ac *addrConn) connect() error {
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
//
// It checks whether current connected address of ac is in the new addrs list.
// If ac is Connecting, it returns false. The caller should tear down the ac and
// create a new one. Note that the backoff will be reset when this happens.
//
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
// addresses will be picked up by retry in the next iteration after backoff.
//
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
@ -778,17 +764,18 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if ac.state == connectivity.Shutdown {
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
ac.addrs = addrs
return true
}
// Unless we're busy reconnecting already, let's reconnect from the top of
// the list.
if ac.state != connectivity.Ready {
if ac.state == connectivity.Connecting {
return false
}
// ac.state is Ready, try to find the connected address.
var curAddrFound bool
for _, a := range addrs {
if reflect.DeepEqual(ac.curAddr, a) {
@ -1037,6 +1024,9 @@ func (ac *addrConn) resetTransport() {
// The spec doesn't mention what should be done for multiple addresses.
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
connectDeadline := time.Now().Add(dialDuration)
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.mu.Unlock()
newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
@ -1079,27 +1069,8 @@ func (ac *addrConn) resetTransport() {
ac.transport = newTr
ac.backoffIdx = 0
healthCheckConfig := ac.cc.healthCheckConfig()
// LB channel health checking is only enabled when all the four requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
// 3. a service config with non-empty healthCheckConfig field is provided,
// 4. the current load balancer allows it.
hctx, hcancel := context.WithCancel(ac.ctx)
healthcheckManagingState := false
if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
if ac.cc.dopts.healthCheckFunc == nil {
// TODO: add a link to the health check doc in the error message.
grpclog.Error("the client side LB channel health check function has not been set.")
} else {
// TODO(deklerk) refactor to just return transport
go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
healthcheckManagingState = true
}
}
if !healthcheckManagingState {
ac.updateConnectivityState(connectivity.Ready)
}
ac.startHealthCheck(hctx)
ac.mu.Unlock()
// Block until the created transport is down. And when this happens,
@ -1133,8 +1104,6 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
ac.mu.Unlock()
return nil, resolver.Address{}, nil, errConnClosing
}
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
@ -1225,42 +1194,83 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
return newTr, reconnect, nil
}
func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
// Set up the health check helper functions
newStream := func() (interface{}, error) {
return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr)
// startHealthCheck starts the health checking stream (RPC) to watch the health
// stats of this connection if health checking is requested and configured.
//
// LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
// 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it
//
// It sets addrConn to READY if the health checking stream is not started.
//
// Caller must hold ac.mu.
func (ac *addrConn) startHealthCheck(ctx context.Context) {
var healthcheckManagingState bool
defer func() {
if !healthcheckManagingState {
ac.updateConnectivityState(connectivity.Ready)
}
}()
if ac.cc.dopts.disableHealthCheck {
return
}
firstReady := true
reportHealth := func(ok bool) {
healthCheckConfig := ac.cc.healthCheckConfig()
if healthCheckConfig == nil {
return
}
if !ac.scopts.HealthCheckEnabled {
return
}
healthCheckFunc := ac.cc.dopts.healthCheckFunc
if healthCheckFunc == nil {
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
grpclog.Error("Health check is requested but health check function is not set.")
return
}
healthcheckManagingState = true
// Set up the health check helper functions.
currentTr := ac.transport
newStream := func(method string) (interface{}, error) {
ac.mu.Lock()
if ac.transport != currentTr {
ac.mu.Unlock()
return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
}
ac.mu.Unlock()
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
}
setConnectivityState := func(s connectivity.State) {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.transport != newTr {
if ac.transport != currentTr {
return
}
if ok {
if firstReady {
firstReady = false
ac.curAddr = addr
}
ac.updateConnectivityState(connectivity.Ready)
} else {
ac.updateConnectivityState(connectivity.TransientFailure)
}
ac.updateConnectivityState(s)
}
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
Severity: channelz.CtError,
})
// Start the health checking stream.
go func() {
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
Severity: channelz.CtError,
})
}
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
}
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
}
}
}()
}
func (ac *addrConn) resetConnectBackoff() {

View File

@ -278,24 +278,22 @@ type ChannelzSecurityValue interface {
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
LocalCertificate []byte
RemoteCertificate []byte
}
func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided.
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
Value proto.Message
}
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",

View File

@ -23,6 +23,8 @@ package internal
import (
"context"
"time"
"google.golang.org/grpc/connectivity"
)
var (
@ -37,10 +39,25 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfig is a function to parse JSON service configs into
// opaque data structures.
ParseServiceConfig func(sc string) (interface{}, error)
// StatusRawProto is exported by status/status.go. This func returns a
// pointer to the wrapped Status proto for a given status.Status without a
// call to proto.Clone(). The returned Status proto should not be mutated by
// the caller.
StatusRawProto interface{} // func (*status.Status) *spb.Status
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.

View File

@ -24,6 +24,7 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -347,7 +348,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ht.stats.HandleRPC(s.ctx, inHeader)
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
windowHandler: func(int) {},
}
@ -361,7 +362,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
for buf := make([]byte, readSize); ; {
n, err := req.Body.Read(buf)
if n > 0 {
s.buf.put(recvMsg{data: buf[:n:n]})
s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
buf = buf[n:]
}
if err != nil {

View File

@ -117,6 +117,8 @@ type http2Client struct {
onGoAway func(GoAwayReason)
onClose func()
bufferPool *bufferPool
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@ -249,6 +251,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
onGoAway: onGoAway,
onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
@ -367,6 +370,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -437,6 +441,15 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
for _, vv := range added {
for i, v := range vv {
if i%2 == 0 {
@ -450,15 +463,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
}
}
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
if md, ok := t.md.(*metadata.MD); ok {
for k, vv := range *md {
@ -946,9 +950,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
data := make([]byte, len(f.Data()))
copy(data, f.Data())
s.write(recvMsg{data: data})
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
}
}
// The server has closed the stream without sending trailers. Record that

View File

@ -35,9 +35,11 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
@ -55,6 +57,9 @@ var (
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
// statusRawProto is a function to get to the raw status proto wrapped in a
// status.Status without a proto.Clone().
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
)
// http2Server implements the ServerTransport interface with HTTP2.
@ -119,6 +124,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czData *channelzData
bufferPool *bufferPool
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -220,6 +226,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
kep: kep,
initialWindowSize: iwz,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow {
@ -405,9 +412,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -591,9 +599,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
data := make([]byte, len(f.Data()))
copy(data, f.Data())
s.write(recvMsg{data: data})
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
}
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
@ -817,7 +826,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
@ -1019,13 +1028,7 @@ func (t *http2Server) Close() error {
}
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
oldState = s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return oldState
}
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
@ -1047,15 +1050,13 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState stream
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
}
return oldState
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
oldState := t.deleteStream(s, eosReceived)
// If the stream is already closed, then don't put trailing header to controlbuf.
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return
}
@ -1063,14 +1064,18 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
onWrite: func() {
t.deleteStream(s, eosReceived)
},
}
t.controlBuf.put(hdr)
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: rst,

View File

@ -22,6 +22,7 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -39,10 +40,32 @@ import (
"google.golang.org/grpc/tap"
)
type bufferPool struct {
pool sync.Pool
}
func newBufferPool() *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
}
}
func (p *bufferPool) get() *bytes.Buffer {
return p.pool.Get().(*bytes.Buffer)
}
func (p *bufferPool) put(b *bytes.Buffer) {
p.pool.Put(b)
}
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
data []byte
buffer *bytes.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@ -117,8 +140,9 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last []byte // Stores the remaining data in the previous calls.
last *bytes.Buffer // Stores the remaining data in the previous calls.
err error
freeBuffer func(*bytes.Buffer)
}
// Read reads the next len(p) bytes from last. If last is drained, it tries to
@ -128,10 +152,13 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil && len(r.last) > 0 {
if r.last != nil {
// Read remaining data left in last call.
copied := copy(p, r.last)
r.last = r.last[copied:]
copied, _ := r.last.Read(p)
if r.last.Len() == 0 {
r.freeBuffer(r.last)
r.last = nil
}
return copied, nil
}
if r.closeStream != nil {
@ -170,8 +197,13 @@ func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error
if m.err != nil {
return 0, m.err
}
copied := copy(p, m.data)
r.last = m.data[copied:]
copied, _ := m.buffer.Read(p)
if m.buffer.Len() == 0 {
r.freeBuffer(m.buffer)
r.last = nil
} else {
r.last = m.buffer
}
return copied, nil
}

View File

@ -17,9 +17,8 @@
*/
// Package naming defines the naming API and related data structures for gRPC.
// The interface is EXPERIMENTAL and may be subject to change.
//
// Deprecated: please use package resolver.
// This package is deprecated: please use package resolver instead.
package naming
// Operation defines the corresponding operations for a name resolution change.

View File

@ -20,6 +20,10 @@
// All APIs in this package are experimental.
package resolver
import (
"google.golang.org/grpc/serviceconfig"
)
var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
@ -100,11 +104,12 @@ type BuildOption struct {
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
Addresses []Address // Resolved addresses for the target
ServiceConfig string // JSON representation of the service config
Addresses []Address // Resolved addresses for the target
// ServiceConfig is the parsed service config; obtained from
// serviceconfig.Parse.
ServiceConfig serviceconfig.Config
// TODO: add Err error
// TODO: add ParsedServiceConfig interface{}
}
// ClientConn contains the callbacks for resolver to notify any updates

View File

@ -138,19 +138,22 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
return
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
c, err := parseServiceConfig(sc)
if err != nil {
return
}
ccr.curState.ServiceConfig = sc
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
}
ccr.curState.ServiceConfig = c
ccr.cc.updateResolverState(ccr.curState)
}
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
return
}
var updates []string
if s.ServiceConfig != ccr.curState.ServiceConfig {
oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
newSC, newOK := s.ServiceConfig.(*ServiceConfig)
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
updates = append(updates, "service config updated")
}
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {

View File

@ -90,7 +90,7 @@ type Server struct {
mu sync.Mutex // guards following
lis map[net.Listener]bool
conns map[io.Closer]bool
conns map[transport.ServerTransport]bool
serve bool
drain bool
cv *sync.Cond // signaled when connections close for GracefulStop
@ -386,7 +386,7 @@ func NewServer(opt ...ServerOption) *Server {
s := &Server{
lis: make(map[net.Listener]bool),
opts: opts,
conns: make(map[io.Closer]bool),
conns: make(map[transport.ServerTransport]bool),
m: make(map[string]*service),
quit: make(chan struct{}),
done: make(chan struct{}),
@ -786,27 +786,27 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
return trInfo
}
func (s *Server) addConn(c io.Closer) bool {
func (s *Server) addConn(st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
c.Close()
st.Close()
return false
}
if s.drain {
// Transport added after we drained our existing conns: drain it
// immediately.
c.(transport.ServerTransport).Drain()
st.Drain()
}
s.conns[c] = true
s.conns[st] = true
return true
}
func (s *Server) removeConn(c io.Closer) {
func (s *Server) removeConn(st transport.ServerTransport) {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns != nil {
delete(s.conns, c)
delete(s.conns, st)
s.cv.Broadcast()
}
}
@ -1423,8 +1423,8 @@ func (s *Server) GracefulStop() {
}
s.lis = nil
if !s.drain {
for c := range s.conns {
c.(transport.ServerTransport).Drain()
for st := range s.conns {
st.Drain()
}
s.drain = true
}

View File

@ -25,8 +25,11 @@ import (
"strings"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
)
const maxInt = int(^uint(0) >> 1)
@ -61,6 +64,11 @@ type MethodConfig struct {
retryPolicy *retryPolicy
}
type lbConfig struct {
name string
cfg serviceconfig.LoadBalancingConfig
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
//
@ -68,10 +76,18 @@ type MethodConfig struct {
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
serviceconfig.Config
// LB is the load balancer the service providers recommends. The balancer
// specified via grpc.WithBalancer will override this. This is deprecated;
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
// will be used.
LB *string
// lbConfig is the service config's load balancing configuration. If
// lbConfig and LB are both present, lbConfig will be used.
lbConfig *lbConfig
// Methods contains a map for the methods in this service. If there is an
// exact match for a method (i.e. /service/method) in the map, use the
// corresponding MethodConfig. If there's no exact match, look for the
@ -233,15 +249,27 @@ type jsonMC struct {
RetryPolicy *jsonRetryPolicy
}
type loadBalancingConfig map[string]json.RawMessage
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
LoadBalancingConfig *[]loadBalancingConfig
MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig
}
func init() {
internal.ParseServiceConfig = func(sc string) (interface{}, error) {
return parseServiceConfig(sc)
}
}
func parseServiceConfig(js string) (*ServiceConfig, error) {
if len(js) == 0 {
return nil, fmt.Errorf("no JSON service config provided")
}
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
@ -255,10 +283,38 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
if rsc.LoadBalancingConfig != nil {
for i, lbcfg := range *rsc.LoadBalancingConfig {
if len(lbcfg) != 1 {
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
grpclog.Warningf(err.Error())
return nil, err
}
var name string
var jsonCfg json.RawMessage
for name, jsonCfg = range lbcfg {
}
builder := balancer.Get(name)
if builder == nil {
continue
}
sc.lbConfig = &lbConfig{name: name}
if parser, ok := builder.(balancer.ConfigParser); ok {
var err error
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
if err != nil {
return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
}
} else if string(jsonCfg) != "{}" {
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
break
}
}
if rsc.MethodConfig == nil {
return &sc, nil
}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
@ -299,11 +355,11 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
}
if sc.retryThrottling != nil {
if sc.retryThrottling.MaxTokens <= 0 ||
sc.retryThrottling.MaxTokens > 1000 ||
sc.retryThrottling.TokenRatio <= 0 {
// Illegal throttling config; disable throttling.
sc.retryThrottling = nil
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
}
if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
}
}
return &sc, nil

View File

@ -0,0 +1,48 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// This package is EXPERIMENTAL.
package serviceconfig
import (
"google.golang.org/grpc/internal"
)
// Config represents an opaque data structure holding a service config.
type Config interface {
isConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
// balancer config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
// Parse parses the JSON service config provided into an internal form or
// returns an error if the config is invalid.
func Parse(ServiceConfigJSON string) (Config, error) {
c, err := internal.ParseServiceConfig(ServiceConfigJSON)
if err != nil {
return nil, err
}
return c.(Config), err
}

View File

@ -36,8 +36,15 @@ import (
"github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal"
)
func init() {
internal.StatusRawProto = statusRawProto
}
func statusRawProto(s *Status) *spb.Status { return s.s }
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status

View File

@ -30,7 +30,6 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/balancerload"
@ -965,19 +964,18 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
ac.mu.Lock()
if ac.transport != t {
ac.mu.Unlock()
return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
}
// transition to CONNECTING state when an attempt starts
if ac.state != connectivity.Connecting {
ac.updateConnectivityState(connectivity.Connecting)
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
ac.mu.Unlock()
// newClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
// of using ac.transpot.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
// - no service config (or wait for service config)
// - no tracing or stats
func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
if t == nil {
// TODO: return RPC error here?
return nil, errors.New("transport provided is nil")
@ -985,14 +983,6 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
c := &callInfo{}
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
// Possible context leak:
// The cancel function for the child context we create will only be called
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
@ -1005,6 +995,13 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
}
}()
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
if err := setCallInfoCodec(c); err != nil {
return nil, err
}
@ -1037,6 +1034,7 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
callHdr.Creds = c.creds
}
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{
callHdr: callHdr,
ac: ac,

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.21.1"
const Version = "1.22.0"