mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
Fresh dep ensure
This commit is contained in:
190
vendor/google.golang.org/grpc/test/balancer_test.go
generated
vendored
Normal file
190
vendor/google.golang.org/grpc/test/balancer_test.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
const testBalancerName = "testbalancer"
|
||||
|
||||
// testBalancer creates one subconn with the first address from resolved
|
||||
// addresses.
|
||||
//
|
||||
// It's used to test options for NewSubConn are applies correctly.
|
||||
type testBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
|
||||
newSubConnOptions balancer.NewSubConnOptions
|
||||
pickOptions []balancer.PickOptions
|
||||
doneInfo []balancer.DoneInfo
|
||||
}
|
||||
|
||||
func (b *testBalancer) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
b.cc = cc
|
||||
return b
|
||||
}
|
||||
|
||||
func (*testBalancer) Name() string {
|
||||
return testBalancerName
|
||||
}
|
||||
|
||||
func (b *testBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
// Only create a subconn at the first time.
|
||||
if err == nil && b.sc == nil {
|
||||
b.sc, err = b.cc.NewSubConn(addrs, b.newSubConnOptions)
|
||||
if err != nil {
|
||||
grpclog.Errorf("testBalancer: failed to NewSubConn: %v", err)
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc, bal: b})
|
||||
b.sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *testBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("testBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
if b.sc != sc {
|
||||
grpclog.Infof("testBalancer: ignored state change because sc is not recognized")
|
||||
return
|
||||
}
|
||||
if s == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
|
||||
switch s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
b.cc.UpdateBalancerState(s, &picker{sc: sc, bal: b})
|
||||
case connectivity.Connecting:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable, bal: b})
|
||||
case connectivity.TransientFailure:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure, bal: b})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *testBalancer) Close() {
|
||||
}
|
||||
|
||||
type picker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
bal *testBalancer
|
||||
}
|
||||
|
||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
p.bal.pickOptions = append(p.bal.pickOptions, opts)
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, func(d balancer.DoneInfo) { p.bal.doneInfo = append(p.bal.doneInfo, d) }, nil
|
||||
}
|
||||
|
||||
func TestCredsBundleFromBalancer(t *testing.T) {
|
||||
balancer.Register(&testBalancer{
|
||||
newSubConnOptions: balancer.NewSubConnOptions{
|
||||
CredsBundle: &testCredsBundle{},
|
||||
},
|
||||
})
|
||||
defer leakcheck.Check(t)
|
||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: ""})
|
||||
te.tapHandle = authHandle
|
||||
te.customDialOptions = []grpc.DialOption{
|
||||
grpc.WithBalancerName(testBalancerName),
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
te.customServerOptions = []grpc.ServerOption{
|
||||
grpc.Creds(creds),
|
||||
}
|
||||
te.startServer(&testServer{})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Test failed. Reason: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPickAndDone(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testPickAndDone(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testPickAndDone(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
b := &testBalancer{}
|
||||
balancer.Register(b)
|
||||
te.customDialOptions = []grpc.DialOption{
|
||||
grpc.WithBalancerName(testBalancerName),
|
||||
}
|
||||
te.userAgent = failAppUA
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
wantErr := detailedError
|
||||
if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(err, wantErr) {
|
||||
t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
|
||||
}
|
||||
md := metadata.Pairs("testMDKey", "testMDVal")
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||
t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
|
||||
}
|
||||
|
||||
poWant := []balancer.PickOptions{
|
||||
{FullMethodName: "/grpc.testing.TestService/EmptyCall"},
|
||||
{FullMethodName: "/grpc.testing.TestService/UnaryCall", Header: md},
|
||||
}
|
||||
if !reflect.DeepEqual(b.pickOptions, poWant) {
|
||||
t.Fatalf("b.pickOptions = %v; want %v", b.pickOptions, poWant)
|
||||
}
|
||||
|
||||
if len(b.doneInfo) < 1 || !reflect.DeepEqual(b.doneInfo[0].Err, wantErr) {
|
||||
t.Fatalf("b.doneInfo = %v; want b.doneInfo[0].Err = %v", b.doneInfo, wantErr)
|
||||
}
|
||||
if len(b.doneInfo) < 2 || !reflect.DeepEqual(b.doneInfo[1].Trailer, testTrailerMetadata) {
|
||||
t.Fatalf("b.doneInfo = %v; want b.doneInfo[1].Trailer = %v", b.doneInfo, testTrailerMetadata)
|
||||
}
|
||||
}
|
100
vendor/google.golang.org/grpc/test/channelz_linux_go110_test.go
generated
vendored
Normal file
100
vendor/google.golang.org/grpc/test/channelz_linux_go110_test.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// +build go1.10,linux,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// The test in this file should be run in an environment that has go1.10 or later,
|
||||
// as the function SyscallConn() (required to get socket option) was
|
||||
// introduced to net.TCPListener in go1.10.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
|
||||
func TestCZSocketMetricsSocketOption(t *testing.T) {
|
||||
envs := []env{tcpClearRREnv, tcpTLSRREnv}
|
||||
for _, e := range envs {
|
||||
testCZSocketMetricsSocketOption(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testCZSocketMetricsSocketOption(t *testing.T, e env) {
|
||||
defer leakcheck.Check(t)
|
||||
channelz.NewChannelzStorage()
|
||||
te := newTest(t, e)
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
doSuccessfulUnaryCall(tc, t)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
ss, _ := channelz.GetServers(0)
|
||||
if len(ss) != 1 {
|
||||
t.Fatalf("There should be one server, not %d", len(ss))
|
||||
}
|
||||
if len(ss[0].ListenSockets) != 1 {
|
||||
t.Fatalf("There should be one listen socket, not %d", len(ss[0].ListenSockets))
|
||||
}
|
||||
for id := range ss[0].ListenSockets {
|
||||
sm := channelz.GetSocket(id)
|
||||
if sm == nil || sm.SocketData == nil || sm.SocketData.SocketOptions == nil {
|
||||
t.Fatalf("Unable to get server listen socket options")
|
||||
}
|
||||
}
|
||||
ns, _ := channelz.GetServerSockets(ss[0].ID, 0)
|
||||
if len(ns) != 1 {
|
||||
t.Fatalf("There should be one server normal socket, not %d", len(ns))
|
||||
}
|
||||
if ns[0] == nil || ns[0].SocketData == nil || ns[0].SocketData.SocketOptions == nil {
|
||||
t.Fatalf("Unable to get server normal socket options")
|
||||
}
|
||||
|
||||
tchan, _ := channelz.GetTopChannels(0)
|
||||
if len(tchan) != 1 {
|
||||
t.Fatalf("There should only be one top channel, not %d", len(tchan))
|
||||
}
|
||||
if len(tchan[0].SubChans) != 1 {
|
||||
t.Fatalf("There should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans))
|
||||
}
|
||||
var id int64
|
||||
for id = range tchan[0].SubChans {
|
||||
break
|
||||
}
|
||||
sc := channelz.GetSubChannel(id)
|
||||
if sc == nil {
|
||||
t.Fatalf("There should only be one socket under subchannel %d, not 0", id)
|
||||
}
|
||||
if len(sc.Sockets) != 1 {
|
||||
t.Fatalf("There should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets))
|
||||
}
|
||||
for id = range sc.Sockets {
|
||||
break
|
||||
}
|
||||
skt := channelz.GetSocket(id)
|
||||
if skt == nil || skt.SocketData == nil || skt.SocketData.SocketOptions == nil {
|
||||
t.Fatalf("Unable to get client normal socket options")
|
||||
}
|
||||
}
|
780
vendor/google.golang.org/grpc/test/channelz_test.go
generated
vendored
780
vendor/google.golang.org/grpc/test/channelz_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
131
vendor/google.golang.org/grpc/test/creds_test.go
generated
vendored
Normal file
131
vendor/google.golang.org/grpc/test/creds_test.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
// TODO(https://github.com/grpc/grpc-go/issues/2330): move all creds releated
|
||||
// tests to this file.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
const (
|
||||
bundlePerRPCOnly = "perRPCOnly"
|
||||
bundleTLSOnly = "tlsOnly"
|
||||
)
|
||||
|
||||
type testCredsBundle struct {
|
||||
t *testing.T
|
||||
mode string
|
||||
}
|
||||
|
||||
func (c *testCredsBundle) TransportCredentials() credentials.TransportCredentials {
|
||||
if c.mode == bundlePerRPCOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com")
|
||||
if err != nil {
|
||||
c.t.Logf("Failed to load credentials: %v", err)
|
||||
return nil
|
||||
}
|
||||
return creds
|
||||
}
|
||||
|
||||
func (c *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials {
|
||||
if c.mode == bundleTLSOnly {
|
||||
return nil
|
||||
}
|
||||
return testPerRPCCredentials{}
|
||||
}
|
||||
|
||||
func (c *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) {
|
||||
return &testCredsBundle{mode: mode}, nil
|
||||
}
|
||||
|
||||
func TestCredsBundleBoth(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
||||
te.tapHandle = authHandle
|
||||
te.customDialOptions = []grpc.DialOption{
|
||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t}),
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
te.customServerOptions = []grpc.ServerOption{
|
||||
grpc.Creds(creds),
|
||||
}
|
||||
te.startServer(&testServer{})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Test failed. Reason: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCredsBundleTransportCredentials(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
||||
te.customDialOptions = []grpc.DialOption{
|
||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundleTLSOnly}),
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
te.customServerOptions = []grpc.ServerOption{
|
||||
grpc.Creds(creds),
|
||||
}
|
||||
te.startServer(&testServer{})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Test failed. Reason: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCredsBundlePerRPCCredentials(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"})
|
||||
te.tapHandle = authHandle
|
||||
te.customDialOptions = []grpc.DialOption{
|
||||
grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundlePerRPCOnly}),
|
||||
}
|
||||
te.startServer(&testServer{})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("Test failed. Reason: %v", err)
|
||||
}
|
||||
}
|
815
vendor/google.golang.org/grpc/test/end2end_test.go
generated
vendored
815
vendor/google.golang.org/grpc/test/end2end_test.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@ -55,8 +56,10 @@ import (
|
||||
"google.golang.org/grpc/health"
|
||||
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/leakcheck"
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -71,7 +74,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpc.RegisterChannelz()
|
||||
channelz.TurnOn()
|
||||
}
|
||||
|
||||
var (
|
||||
@ -452,6 +455,8 @@ type test struct {
|
||||
maxClientSendMsgSize *int
|
||||
maxServerReceiveMsgSize *int
|
||||
maxServerSendMsgSize *int
|
||||
maxClientHeaderListSize *uint32
|
||||
maxServerHeaderListSize *uint32
|
||||
userAgent string
|
||||
// clientCompression and serverCompression are set to test the deprecated API
|
||||
// WithCompressor and WithDecompressor.
|
||||
@ -474,6 +479,7 @@ type test struct {
|
||||
clientInitialConnWindowSize int32
|
||||
perRPCCreds credentials.PerRPCCredentials
|
||||
customDialOptions []grpc.DialOption
|
||||
customServerOptions []grpc.ServerOption
|
||||
resolverScheme string
|
||||
cliKeepAlive *keepalive.ClientParameters
|
||||
svrKeepAlive *keepalive.ServerParameters
|
||||
@ -483,7 +489,7 @@ type test struct {
|
||||
nonBlockingDial bool
|
||||
|
||||
// srv and srvAddr are set once startServer is called.
|
||||
srv *grpc.Server
|
||||
srv stopper
|
||||
srvAddr string
|
||||
|
||||
// srvs and srvAddrs are set once startServers is called.
|
||||
@ -494,6 +500,11 @@ type test struct {
|
||||
restoreLogs func() // nil unless declareLogNoise is used
|
||||
}
|
||||
|
||||
type stopper interface {
|
||||
Stop()
|
||||
GracefulStop()
|
||||
}
|
||||
|
||||
func (te *test) tearDown() {
|
||||
if te.cancel != nil {
|
||||
te.cancel()
|
||||
@ -546,6 +557,9 @@ func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network,
|
||||
if te.maxServerSendMsgSize != nil {
|
||||
sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
|
||||
}
|
||||
if te.maxServerHeaderListSize != nil {
|
||||
sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
|
||||
}
|
||||
if te.tapHandle != nil {
|
||||
sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
|
||||
}
|
||||
@ -596,11 +610,9 @@ func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network,
|
||||
if te.svrKeepAlive != nil {
|
||||
sopts = append(sopts, grpc.KeepaliveParams(*te.svrKeepAlive))
|
||||
}
|
||||
sopts = append(sopts, te.customServerOptions...)
|
||||
s := grpc.NewServer(sopts...)
|
||||
te.srv = s
|
||||
if te.e.httpHandler {
|
||||
internal.TestingUseHandlerImpl(s)
|
||||
}
|
||||
if te.healthServer != nil {
|
||||
healthgrpc.RegisterHealthServer(s, te.healthServer)
|
||||
}
|
||||
@ -618,11 +630,100 @@ func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network,
|
||||
addr = "localhost:" + port
|
||||
}
|
||||
|
||||
go s.Serve(lis)
|
||||
te.srvAddr = addr
|
||||
|
||||
if te.e.httpHandler {
|
||||
if te.e.security != "tls" {
|
||||
te.t.Fatalf("unsupported environment settings")
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key"))
|
||||
if err != nil {
|
||||
te.t.Fatal("Error creating TLS certificate: ", err)
|
||||
}
|
||||
hs := &http.Server{
|
||||
Handler: s,
|
||||
}
|
||||
err = http2.ConfigureServer(hs, &http2.Server{
|
||||
MaxConcurrentStreams: te.maxStream,
|
||||
})
|
||||
if err != nil {
|
||||
te.t.Fatal("error starting http2 server: ", err)
|
||||
}
|
||||
hs.TLSConfig.Certificates = []tls.Certificate{cert}
|
||||
tlsListener := tls.NewListener(lis, hs.TLSConfig)
|
||||
whs := &wrapHS{Listener: tlsListener, s: hs, conns: make(map[net.Conn]bool)}
|
||||
te.srv = whs
|
||||
go hs.Serve(whs)
|
||||
|
||||
return lis
|
||||
}
|
||||
|
||||
go s.Serve(lis)
|
||||
return lis
|
||||
}
|
||||
|
||||
// TODO: delete wrapHS and wrapConn when Go1.6 and Go1.7 support are gone and
|
||||
// call s.Close and s.Shutdown instead.
|
||||
type wrapHS struct {
|
||||
sync.Mutex
|
||||
net.Listener
|
||||
s *http.Server
|
||||
conns map[net.Conn]bool
|
||||
}
|
||||
|
||||
func (w *wrapHS) Accept() (net.Conn, error) {
|
||||
c, err := w.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Lock()
|
||||
if w.conns == nil {
|
||||
w.Unlock()
|
||||
c.Close()
|
||||
return nil, errors.New("connection after listener closed")
|
||||
}
|
||||
w.conns[&wrapConn{Conn: c, hs: w}] = true
|
||||
w.Unlock()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (w *wrapHS) Stop() {
|
||||
w.Listener.Close()
|
||||
w.Lock()
|
||||
conns := w.conns
|
||||
w.conns = nil
|
||||
w.Unlock()
|
||||
for c := range conns {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Poll for now..
|
||||
func (w *wrapHS) GracefulStop() {
|
||||
w.Listener.Close()
|
||||
for {
|
||||
w.Lock()
|
||||
l := len(w.conns)
|
||||
w.Unlock()
|
||||
if l == 0 {
|
||||
return
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
type wrapConn struct {
|
||||
net.Conn
|
||||
hs *wrapHS
|
||||
}
|
||||
|
||||
func (w *wrapConn) Close() error {
|
||||
w.hs.Lock()
|
||||
delete(w.hs.conns, w.Conn)
|
||||
w.hs.Unlock()
|
||||
return w.Conn.Close()
|
||||
}
|
||||
|
||||
func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper {
|
||||
l := te.listenAndServe(ts, listenWithConnControl)
|
||||
return l.(*listenerWrapper)
|
||||
@ -697,6 +798,9 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string)
|
||||
if te.maxClientSendMsgSize != nil {
|
||||
opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
|
||||
}
|
||||
if te.maxClientHeaderListSize != nil {
|
||||
opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
|
||||
}
|
||||
switch te.e.security {
|
||||
case "tls":
|
||||
creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com")
|
||||
@ -706,6 +810,8 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string)
|
||||
opts = append(opts, grpc.WithTransportCredentials(creds))
|
||||
case "clientTimeoutCreds":
|
||||
opts = append(opts, grpc.WithTransportCredentials(&clientTimeoutCreds{}))
|
||||
case "empty":
|
||||
// Don't add any transport creds option.
|
||||
default:
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
@ -1888,9 +1994,7 @@ func TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
|
||||
|
||||
te.resolverScheme = r.Scheme()
|
||||
te.nonBlockingDial = true
|
||||
fmt.Println("1")
|
||||
cc := te.clientConn()
|
||||
fmt.Println("10")
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: te.srvAddr}})
|
||||
@ -2330,6 +2434,216 @@ func testHealthCheckOff(t *testing.T, e env) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthWatchMultipleClients(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testHealthWatchMultipleClients(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testHealthWatchMultipleClients(t *testing.T, e env) {
|
||||
const service = "grpc.health.v1.Health1"
|
||||
|
||||
hs := health.NewServer()
|
||||
|
||||
te := newTest(t, e)
|
||||
te.healthServer = hs
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req := &healthpb.HealthCheckRequest{
|
||||
Service: service,
|
||||
}
|
||||
|
||||
stream1, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
|
||||
|
||||
stream2, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
|
||||
|
||||
hs.SetServingStatus(service, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
|
||||
healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
}
|
||||
|
||||
func TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testHealthWatchSetServiceStatusBeforeStartingServer(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) {
|
||||
const service = "grpc.health.v1.Health1"
|
||||
|
||||
hs := health.NewServer()
|
||||
|
||||
te := newTest(t, e)
|
||||
te.healthServer = hs
|
||||
|
||||
hs.SetServingStatus(service, healthpb.HealthCheckResponse_SERVING)
|
||||
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req := &healthpb.HealthCheckRequest{
|
||||
Service: service,
|
||||
}
|
||||
|
||||
stream, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
|
||||
}
|
||||
|
||||
func TestHealthWatchDefaultStatusChange(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testHealthWatchDefaultStatusChange(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testHealthWatchDefaultStatusChange(t *testing.T, e env) {
|
||||
const service = "grpc.health.v1.Health1"
|
||||
|
||||
hs := health.NewServer()
|
||||
|
||||
te := newTest(t, e)
|
||||
te.healthServer = hs
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req := &healthpb.HealthCheckRequest{
|
||||
Service: service,
|
||||
}
|
||||
|
||||
stream, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
|
||||
|
||||
hs.SetServingStatus(service, healthpb.HealthCheckResponse_SERVING)
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
|
||||
}
|
||||
|
||||
func TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) {
|
||||
const service = "grpc.health.v1.Health1"
|
||||
|
||||
hs := health.NewServer()
|
||||
|
||||
te := newTest(t, e)
|
||||
te.healthServer = hs
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req := &healthpb.HealthCheckRequest{
|
||||
Service: service,
|
||||
}
|
||||
|
||||
hs.SetServingStatus(service, healthpb.HealthCheckResponse_SERVING)
|
||||
|
||||
stream, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
|
||||
}
|
||||
|
||||
func TestHealthWatchOverallServerHealthChange(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testHealthWatchOverallServerHealthChange(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testHealthWatchOverallServerHealthChange(t *testing.T, e env) {
|
||||
const service = ""
|
||||
|
||||
hs := health.NewServer()
|
||||
|
||||
te := newTest(t, e)
|
||||
te.healthServer = hs
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
hc := healthgrpc.NewHealthClient(cc)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req := &healthpb.HealthCheckRequest{
|
||||
Service: service,
|
||||
}
|
||||
|
||||
stream, err := hc.Watch(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
|
||||
|
||||
hs.SetServingStatus(service, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
|
||||
healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||
}
|
||||
|
||||
func healthWatchChecker(t *testing.T, stream healthpb.Health_WatchClient, expectedServingStatus healthpb.HealthCheckResponse_ServingStatus) {
|
||||
response, err := stream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("error on %v.Recv(): %v", stream, err)
|
||||
}
|
||||
if response.Status != expectedServingStatus {
|
||||
t.Fatalf("response.Status is %v (%v expected)", response.Status, expectedServingStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnknownHandler(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// An example unknownHandler that returns a different code and a different method, making sure that we do not
|
||||
@ -2961,10 +3275,6 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
|
||||
defer te.tearDown()
|
||||
tc := testpb.NewTestServiceClient(te.clientConn())
|
||||
|
||||
const (
|
||||
argSize = 1
|
||||
respSize = 1
|
||||
)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
@ -3075,7 +3385,9 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
|
||||
argSize = 1
|
||||
respSize = -1
|
||||
)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx = metadata.NewOutgoingContext(ctx, testMetadata)
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
@ -3151,20 +3463,20 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
func TestTransparentRetry(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
if e.name == "handler-tls" {
|
||||
// Fails with RST_STREAM / FLOW_CONTROL_ERROR
|
||||
continue
|
||||
}
|
||||
testRetry(t, e)
|
||||
testTransparentRetry(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
// This test make sure RPCs are retried times when they receive a RST_STREAM
|
||||
// This test makes sure RPCs are retried times when they receive a RST_STREAM
|
||||
// with the REFUSED_STREAM error code, which the InTapHandle provokes.
|
||||
func testRetry(t *testing.T, e env) {
|
||||
func testTransparentRetry(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
attempts := 0
|
||||
successAttempt := 2
|
||||
@ -4559,9 +4871,7 @@ func TestCredsHandshakeAuthority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type clientFailCreds struct {
|
||||
got string
|
||||
}
|
||||
type clientFailCreds struct{}
|
||||
|
||||
func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
return rawConn, nil, nil
|
||||
@ -4845,8 +5155,11 @@ type stubServer struct {
|
||||
|
||||
// A client connected to this service the test may use. Created in Start().
|
||||
client testpb.TestServiceClient
|
||||
cc *grpc.ClientConn
|
||||
|
||||
cleanups []func() // Lambdas executed in Stop(); populated by Start().
|
||||
|
||||
r *manual.Resolver
|
||||
}
|
||||
|
||||
func (ss *stubServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
|
||||
@ -4858,7 +5171,11 @@ func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallSer
|
||||
}
|
||||
|
||||
// Start starts the server and creates a client connected to it.
|
||||
func (ss *stubServer) Start(sopts []grpc.ServerOption) error {
|
||||
func (ss *stubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error {
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
ss.r = r
|
||||
ss.cleanups = append(ss.cleanups, cleanup)
|
||||
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return fmt.Errorf(`net.Listen("tcp", "localhost:0") = %v`, err)
|
||||
@ -4870,16 +5187,40 @@ func (ss *stubServer) Start(sopts []grpc.ServerOption) error {
|
||||
go s.Serve(lis)
|
||||
ss.cleanups = append(ss.cleanups, s.Stop)
|
||||
|
||||
cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock())
|
||||
target := ss.r.Scheme() + ":///" + lis.Addr().String()
|
||||
|
||||
opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...)
|
||||
cc, err := grpc.Dial(target, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
|
||||
return fmt.Errorf("grpc.Dial(%q) = %v", target, err)
|
||||
}
|
||||
ss.cc = cc
|
||||
ss.r.NewAddress([]resolver.Address{{Addr: lis.Addr().String()}})
|
||||
if err := ss.waitForReady(cc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ss.cleanups = append(ss.cleanups, func() { cc.Close() })
|
||||
|
||||
ss.client = testpb.NewTestServiceClient(cc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *stubServer) waitForReady(cc *grpc.ClientConn) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
for {
|
||||
s := cc.GetState()
|
||||
if s == connectivity.Ready {
|
||||
return nil
|
||||
}
|
||||
if !cc.WaitForStateChange(ctx, s) {
|
||||
// ctx got timeout or canceled.
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *stubServer) Stop() {
|
||||
for i := len(ss.cleanups) - 1; i >= 0; i-- {
|
||||
ss.cleanups[i]()
|
||||
@ -5125,7 +5466,7 @@ func TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
|
||||
}
|
||||
sopts := []grpc.ServerOption{}
|
||||
if err := ss.Start(sopts); err != nil {
|
||||
t.Fatalf("Error starting endpoing server: %v", err)
|
||||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@ -5143,7 +5484,6 @@ func TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
|
||||
t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type windowSizeConfig struct {
|
||||
@ -6340,3 +6680,426 @@ func testRPCTimeout(t *testing.T, e env) {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func TestDisabledIOBuffers(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create payload: %v", err)
|
||||
}
|
||||
req := &testpb.StreamingOutputCallRequest{
|
||||
Payload: payload,
|
||||
}
|
||||
resp := &testpb.StreamingOutputCallResponse{
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
ss := &stubServer{
|
||||
fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
|
||||
t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
|
||||
return err
|
||||
}
|
||||
if err := stream.Send(resp); err != nil {
|
||||
t.Errorf("stream.Send(_)= %v, want <nil>", err)
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
|
||||
testpb.RegisterTestServiceServer(s, ss)
|
||||
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create listener: %v", err)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
s.Serve(lis)
|
||||
close(done)
|
||||
}()
|
||||
defer s.Stop()
|
||||
dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer dcancel()
|
||||
cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial server")
|
||||
}
|
||||
defer cc.Close()
|
||||
c := testpb.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
stream, err := c.FullDuplexCall(ctx, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to send test RPC to server")
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := stream.Send(req); err != nil {
|
||||
t.Fatalf("stream.Send(_) = %v, want <nil>", err)
|
||||
}
|
||||
in, err := stream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
|
||||
}
|
||||
if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
|
||||
t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
|
||||
}
|
||||
}
|
||||
stream.CloseSend()
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
if e.httpHandler {
|
||||
continue
|
||||
}
|
||||
testServerMaxHeaderListSizeClientUserViolation(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
te.maxServerHeaderListSize = new(uint32)
|
||||
*te.maxServerHeaderListSize = 216
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
|
||||
var err error
|
||||
if err = verifyResultWithDelay(func() (bool, error) {
|
||||
if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
if e.httpHandler {
|
||||
continue
|
||||
}
|
||||
testClientMaxHeaderListSizeServerUserViolation(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
te.maxClientHeaderListSize = new(uint32)
|
||||
*te.maxClientHeaderListSize = 1 // any header server sends will violate
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc := te.clientConn()
|
||||
tc := testpb.NewTestServiceClient(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var err error
|
||||
if err = verifyResultWithDelay(func() (bool, error) {
|
||||
if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
if e.httpHandler || e.security == "tls" {
|
||||
continue
|
||||
}
|
||||
testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
te.maxServerHeaderListSize = new(uint32)
|
||||
*te.maxServerHeaderListSize = 512
|
||||
te.startServer(&testServer{security: e.security})
|
||||
defer te.tearDown()
|
||||
|
||||
cc, dw := te.clientConnWithConnControl()
|
||||
tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
|
||||
}
|
||||
rcw := dw.getRawConnWrapper()
|
||||
val := make([]string, 512)
|
||||
for i := range val {
|
||||
val[i] = "a"
|
||||
}
|
||||
// allow for client to send the initial header
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
rcw.writeHeaders(http2.HeadersFrameParam{
|
||||
StreamID: tc.getCurrentStreamID(),
|
||||
BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
|
||||
t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
if e.httpHandler || e.security == "tls" {
|
||||
continue
|
||||
}
|
||||
testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
te.maxClientHeaderListSize = new(uint32)
|
||||
*te.maxClientHeaderListSize = 200
|
||||
lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
|
||||
defer te.tearDown()
|
||||
cc, _ := te.clientConnWithConnControl()
|
||||
tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
stream, err := tc.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
|
||||
}
|
||||
var i int
|
||||
var rcw *rawConnWrapper
|
||||
for i = 0; i < 100; i++ {
|
||||
rcw = lw.getLastConn()
|
||||
if rcw != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
if i == 100 {
|
||||
t.Fatalf("failed to create server transport after 1s")
|
||||
}
|
||||
|
||||
val := make([]string, 200)
|
||||
for i := range val {
|
||||
val[i] = "a"
|
||||
}
|
||||
// allow for client to send the initial header.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
rcw.writeHeaders(http2.HeadersFrameParam{
|
||||
StreamID: tc.getCurrentStreamID(),
|
||||
BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
|
||||
t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetPipeConn(t *testing.T) {
|
||||
// This test will block indefinitely if grpc writes both client and server
|
||||
// prefaces without either reading from the Conn.
|
||||
defer leakcheck.Check(t)
|
||||
pl := testutils.NewPipeListener()
|
||||
s := grpc.NewServer()
|
||||
defer s.Stop()
|
||||
ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
return &testpb.SimpleResponse{}, nil
|
||||
}}
|
||||
testpb.RegisterTestServiceServer(s, ts)
|
||||
go s.Serve(pl)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer()))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating client: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
client := testpb.NewTestServiceClient(cc)
|
||||
if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||
t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLargeTimeout(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
for _, e := range listTestEnv() {
|
||||
testLargeTimeout(t, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testLargeTimeout(t *testing.T, e env) {
|
||||
te := newTest(t, e)
|
||||
te.declareLogNoise("Server.processUnaryRPC failed to write status")
|
||||
|
||||
ts := &funcServer{}
|
||||
te.startServer(ts)
|
||||
defer te.tearDown()
|
||||
tc := testpb.NewTestServiceClient(te.clientConn())
|
||||
|
||||
timeouts := []time.Duration{
|
||||
time.Duration(math.MaxInt64), // will be (correctly) converted to
|
||||
// 2562048 hours, which overflows upon converting back to an int64
|
||||
2562047 * time.Hour, // the largest timeout that does not overflow
|
||||
}
|
||||
|
||||
for i, maxTimeout := range timeouts {
|
||||
ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
deadline, ok := ctx.Deadline()
|
||||
timeout := deadline.Sub(time.Now())
|
||||
minTimeout := maxTimeout - 5*time.Second
|
||||
if !ok || timeout < minTimeout || timeout > maxTimeout {
|
||||
t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
|
||||
return nil, status.Error(codes.OutOfRange, "deadline error")
|
||||
}
|
||||
return &testpb.SimpleResponse{}, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||
t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This
|
||||
// test ensures that the connection is re-created after GO_AWAY and not affected by the
|
||||
// subsequent (old) connection closure.
|
||||
func TestGoAwayThenClose(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
lis1, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listening. Err: %v", err)
|
||||
}
|
||||
s1 := grpc.NewServer()
|
||||
defer s1.Stop()
|
||||
ts1 := &funcServer{
|
||||
unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
return &testpb.SimpleResponse{}, nil
|
||||
},
|
||||
fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
// Wait forever.
|
||||
_, err := stream.Recv()
|
||||
if err == nil {
|
||||
t.Error("expected to never receive any message")
|
||||
}
|
||||
return err
|
||||
},
|
||||
}
|
||||
testpb.RegisterTestServiceServer(s1, ts1)
|
||||
go s1.Serve(lis1)
|
||||
|
||||
conn2Established := grpcsync.NewEvent()
|
||||
lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listening. Err: %v", err)
|
||||
}
|
||||
s2 := grpc.NewServer()
|
||||
defer s2.Stop()
|
||||
conn2Ready := grpcsync.NewEvent()
|
||||
ts2 := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
conn2Ready.Fire()
|
||||
return &testpb.SimpleResponse{}, nil
|
||||
}}
|
||||
testpb.RegisterTestServiceServer(s2, ts2)
|
||||
go s2.Serve(lis2)
|
||||
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
r.InitialAddrs([]resolver.Address{
|
||||
{Addr: lis1.Addr().String()},
|
||||
{Addr: lis2.Addr().String()},
|
||||
})
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating client: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
|
||||
client := testpb.NewTestServiceClient(cc)
|
||||
|
||||
// Should go on connection 1. We use a long-lived RPC because it will cause GracefulStop to send GO_AWAY, but the
|
||||
// connection doesn't get closed until the server stops and the client receives.
|
||||
stream, err := client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err)
|
||||
}
|
||||
|
||||
// Send GO_AWAY to connection 1.
|
||||
go s1.GracefulStop()
|
||||
|
||||
// Wait for connection 2 to be established.
|
||||
<-conn2Established.Done()
|
||||
|
||||
// Close connection 1.
|
||||
s1.Stop()
|
||||
|
||||
// Wait for client to close.
|
||||
_, err = stream.Recv()
|
||||
if err == nil {
|
||||
t.Fatal("expected the stream to die, but got a successful Recv")
|
||||
}
|
||||
|
||||
// Do a bunch of RPCs, make sure it stays stable. These should go to connection 2.
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
|
||||
t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
|
||||
lis, err := net.Listen(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notifyingListener{connEstablished: event, Listener: lis}, nil
|
||||
}
|
||||
|
||||
type notifyingListener struct {
|
||||
connEstablished *grpcsync.Event
|
||||
net.Listener
|
||||
}
|
||||
|
||||
func (lis notifyingListener) Accept() (net.Conn, error) {
|
||||
defer lis.connEstablished.Fire()
|
||||
return lis.Listener.Accept()
|
||||
}
|
||||
|
47
vendor/google.golang.org/grpc/test/go_vet/vet.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/test/go_vet/vet.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// vet.go is a script to check whether files that are supposed to be built on appengine import
|
||||
// unsupported package (e.g. "unsafe", "syscall") or not.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b := build.Default
|
||||
b.BuildTags = []string{"appengine", "appenginevm"}
|
||||
argsWithoutProg := os.Args[1:]
|
||||
for _, dir := range argsWithoutProg {
|
||||
p, err := b.Import(".", dir, 0)
|
||||
if _, ok := err.(*build.NoGoError); ok {
|
||||
continue
|
||||
} else if err != nil {
|
||||
fmt.Printf("build.Import failed due to %v\n", err)
|
||||
continue
|
||||
}
|
||||
for _, pkg := range p.Imports {
|
||||
if pkg == "syscall" || pkg == "unsafe" {
|
||||
fmt.Printf("Package %s/%s importing %s package without appengine build tag is NOT ALLOWED!\n", p.Dir, p.Name, pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
26
vendor/google.golang.org/grpc/test/rawConnWrapper.go
generated
vendored
26
vendor/google.golang.org/grpc/test/rawConnWrapper.go
generated
vendored
@ -161,24 +161,24 @@ func (rcw *rawConnWrapper) greet() error {
|
||||
func (rcw *rawConnWrapper) writePreface() error {
|
||||
n, err := rcw.cc.Write([]byte(http2.ClientPreface))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing client preface: %v", err)
|
||||
return fmt.Errorf("error writing client preface: %v", err)
|
||||
}
|
||||
if n != len(http2.ClientPreface) {
|
||||
return fmt.Errorf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface))
|
||||
return fmt.Errorf("writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeInitialSettings() error {
|
||||
if err := rcw.fr.WriteSettings(); err != nil {
|
||||
return fmt.Errorf("Error writing initial SETTINGS frame from client to server: %v", err)
|
||||
return fmt.Errorf("error writing initial SETTINGS frame from client to server: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeSettingsAck() error {
|
||||
if err := rcw.fr.WriteSettingsAck(); err != nil {
|
||||
return fmt.Errorf("Error writing ACK of server's SETTINGS: %v", err)
|
||||
return fmt.Errorf("error writing ACK of server's SETTINGS: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -186,7 +186,7 @@ func (rcw *rawConnWrapper) writeSettingsAck() error {
|
||||
func (rcw *rawConnWrapper) wantSettings() (*http2.SettingsFrame, error) {
|
||||
f, err := rcw.readFrame()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error while expecting a SETTINGS frame: %v", err)
|
||||
return nil, fmt.Errorf("error while expecting a SETTINGS frame: %v", err)
|
||||
}
|
||||
sf, ok := f.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
@ -202,10 +202,10 @@ func (rcw *rawConnWrapper) wantSettingsAck() error {
|
||||
}
|
||||
sf, ok := f.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
return fmt.Errorf("Wanting a settings ACK, received a %T", f)
|
||||
return fmt.Errorf("wanting a settings ACK, received a %T", f)
|
||||
}
|
||||
if !sf.IsAck() {
|
||||
return fmt.Errorf("Settings Frame didn't have ACK set")
|
||||
return fmt.Errorf("settings Frame didn't have ACK set")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -304,42 +304,42 @@ func (rcw *rawConnWrapper) writeHeadersGRPC(streamID uint32, path string) {
|
||||
|
||||
func (rcw *rawConnWrapper) writeHeaders(p http2.HeadersFrameParam) error {
|
||||
if err := rcw.fr.WriteHeaders(p); err != nil {
|
||||
return fmt.Errorf("Error writing HEADERS: %v", err)
|
||||
return fmt.Errorf("error writing HEADERS: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeData(streamID uint32, endStream bool, data []byte) error {
|
||||
if err := rcw.fr.WriteData(streamID, endStream, data); err != nil {
|
||||
return fmt.Errorf("Error writing DATA: %v", err)
|
||||
return fmt.Errorf("error writing DATA: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeRSTStream(streamID uint32, code http2.ErrCode) error {
|
||||
if err := rcw.fr.WriteRSTStream(streamID, code); err != nil {
|
||||
return fmt.Errorf("Error writing RST_STREAM: %v", err)
|
||||
return fmt.Errorf("error writing RST_STREAM: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeDataPadded(streamID uint32, endStream bool, data, padding []byte) error {
|
||||
if err := rcw.fr.WriteDataPadded(streamID, endStream, data, padding); err != nil {
|
||||
return fmt.Errorf("Error writing DATA with padding: %v", err)
|
||||
return fmt.Errorf("error writing DATA with padding: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeGoAway(maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
|
||||
if err := rcw.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
|
||||
return fmt.Errorf("Error writing GoAway: %v", err)
|
||||
return fmt.Errorf("error writing GoAway: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcw *rawConnWrapper) writeRawFrame(t http2.FrameType, flags http2.Flags, streamID uint32, payload []byte) error {
|
||||
if err := rcw.fr.WriteRawFrame(t, flags, streamID, payload); err != nil {
|
||||
return fmt.Errorf("Error writing Raw Frame: %v", err)
|
||||
return fmt.Errorf("error writing Raw Frame: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
551
vendor/google.golang.org/grpc/test/retry_test.go
generated
vendored
Normal file
551
vendor/google.golang.org/grpc/test/retry_test.go
generated
vendored
Normal file
@ -0,0 +1,551 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
|
||||
func enableRetry() func() {
|
||||
old := envconfig.Retry
|
||||
envconfig.Retry = true
|
||||
return func() { envconfig.Retry = old }
|
||||
}
|
||||
|
||||
func TestRetryUnary(t *testing.T) {
|
||||
defer enableRetry()()
|
||||
i := -1
|
||||
ss := &stubServer{
|
||||
emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
|
||||
i++
|
||||
switch i {
|
||||
case 0, 2, 5:
|
||||
return &testpb.Empty{}, nil
|
||||
case 6, 8, 11:
|
||||
return nil, status.New(codes.Internal, "non-retryable error").Err()
|
||||
}
|
||||
return nil, status.New(codes.AlreadyExists, "retryable error").Err()
|
||||
},
|
||||
}
|
||||
if err := ss.Start([]grpc.ServerOption{}); err != nil {
|
||||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ss.r.NewServiceConfig(`{
|
||||
"methodConfig": [{
|
||||
"name": [{"service": "grpc.testing.TestService"}],
|
||||
"waitForReady": true,
|
||||
"retryPolicy": {
|
||||
"MaxAttempts": 4,
|
||||
"InitialBackoff": ".01s",
|
||||
"MaxBackoff": ".01s",
|
||||
"BackoffMultiplier": 1.0,
|
||||
"RetryableStatusCodes": [ "ALREADY_EXISTS" ]
|
||||
}
|
||||
}]}`)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatalf("Timed out waiting for service config update")
|
||||
}
|
||||
if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
cancel()
|
||||
|
||||
testCases := []struct {
|
||||
code codes.Code
|
||||
count int
|
||||
}{
|
||||
{codes.OK, 0},
|
||||
{codes.OK, 2},
|
||||
{codes.OK, 5},
|
||||
{codes.Internal, 6},
|
||||
{codes.Internal, 8},
|
||||
{codes.Internal, 11},
|
||||
{codes.AlreadyExists, 15},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if status.Code(err) != tc.code {
|
||||
t.Fatalf("EmptyCall(_, _) = _, %v; want _, <Code() = %v>", err, tc.code)
|
||||
}
|
||||
if i != tc.count {
|
||||
t.Fatalf("i = %v; want %v", i, tc.count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryDisabledByDefault(t *testing.T) {
|
||||
if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") {
|
||||
return
|
||||
}
|
||||
i := -1
|
||||
ss := &stubServer{
|
||||
emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
|
||||
i++
|
||||
switch i {
|
||||
case 0:
|
||||
return nil, status.New(codes.AlreadyExists, "retryable error").Err()
|
||||
}
|
||||
return &testpb.Empty{}, nil
|
||||
},
|
||||
}
|
||||
if err := ss.Start([]grpc.ServerOption{}); err != nil {
|
||||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ss.r.NewServiceConfig(`{
|
||||
"methodConfig": [{
|
||||
"name": [{"service": "grpc.testing.TestService"}],
|
||||
"waitForReady": true,
|
||||
"retryPolicy": {
|
||||
"MaxAttempts": 4,
|
||||
"InitialBackoff": ".01s",
|
||||
"MaxBackoff": ".01s",
|
||||
"BackoffMultiplier": 1.0,
|
||||
"RetryableStatusCodes": [ "ALREADY_EXISTS" ]
|
||||
}
|
||||
}]}`)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatalf("Timed out waiting for service config update")
|
||||
}
|
||||
if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
cancel()
|
||||
|
||||
testCases := []struct {
|
||||
code codes.Code
|
||||
count int
|
||||
}{
|
||||
{codes.AlreadyExists, 0},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if status.Code(err) != tc.code {
|
||||
t.Fatalf("EmptyCall(_, _) = _, %v; want _, <Code() = %v>", err, tc.code)
|
||||
}
|
||||
if i != tc.count {
|
||||
t.Fatalf("i = %v; want %v", i, tc.count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryThrottling(t *testing.T) {
|
||||
defer enableRetry()()
|
||||
i := -1
|
||||
ss := &stubServer{
|
||||
emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
|
||||
i++
|
||||
switch i {
|
||||
case 0, 3, 6, 10, 11, 12, 13, 14, 16, 18:
|
||||
return &testpb.Empty{}, nil
|
||||
}
|
||||
return nil, status.New(codes.Unavailable, "retryable error").Err()
|
||||
},
|
||||
}
|
||||
if err := ss.Start([]grpc.ServerOption{}); err != nil {
|
||||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ss.r.NewServiceConfig(`{
|
||||
"methodConfig": [{
|
||||
"name": [{"service": "grpc.testing.TestService"}],
|
||||
"waitForReady": true,
|
||||
"retryPolicy": {
|
||||
"MaxAttempts": 4,
|
||||
"InitialBackoff": ".01s",
|
||||
"MaxBackoff": ".01s",
|
||||
"BackoffMultiplier": 1.0,
|
||||
"RetryableStatusCodes": [ "UNAVAILABLE" ]
|
||||
}
|
||||
}],
|
||||
"retryThrottling": {
|
||||
"maxTokens": 10,
|
||||
"tokenRatio": 0.5
|
||||
}
|
||||
}`)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatalf("Timed out waiting for service config update")
|
||||
}
|
||||
if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
cancel()
|
||||
|
||||
testCases := []struct {
|
||||
code codes.Code
|
||||
count int
|
||||
}{
|
||||
{codes.OK, 0}, // tokens = 10
|
||||
{codes.OK, 3}, // tokens = 8.5 (10 - 2 failures + 0.5 success)
|
||||
{codes.OK, 6}, // tokens = 6
|
||||
{codes.Unavailable, 8}, // tokens = 5 -- first attempt is retried; second aborted.
|
||||
{codes.Unavailable, 9}, // tokens = 4
|
||||
{codes.OK, 10}, // tokens = 4.5
|
||||
{codes.OK, 11}, // tokens = 5
|
||||
{codes.OK, 12}, // tokens = 5.5
|
||||
{codes.OK, 13}, // tokens = 6
|
||||
{codes.OK, 14}, // tokens = 6.5
|
||||
{codes.OK, 16}, // tokens = 5.5
|
||||
{codes.Unavailable, 17}, // tokens = 4.5
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
_, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
|
||||
cancel()
|
||||
if status.Code(err) != tc.code {
|
||||
t.Errorf("EmptyCall(_, _) = _, %v; want _, <Code() = %v>", err, tc.code)
|
||||
}
|
||||
if i != tc.count {
|
||||
t.Errorf("i = %v; want %v", i, tc.count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryStreaming(t *testing.T) {
|
||||
defer enableRetry()()
|
||||
req := func(b byte) *testpb.StreamingOutputCallRequest {
|
||||
return &testpb.StreamingOutputCallRequest{Payload: &testpb.Payload{Body: []byte{b}}}
|
||||
}
|
||||
res := func(b byte) *testpb.StreamingOutputCallResponse {
|
||||
return &testpb.StreamingOutputCallResponse{Payload: &testpb.Payload{Body: []byte{b}}}
|
||||
}
|
||||
|
||||
largePayload, _ := newPayload(testpb.PayloadType_COMPRESSABLE, 500)
|
||||
|
||||
type serverOp func(stream testpb.TestService_FullDuplexCallServer) error
|
||||
type clientOp func(stream testpb.TestService_FullDuplexCallClient) error
|
||||
|
||||
// Server Operations
|
||||
sAttempts := func(n int) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
const key = "grpc-previous-rpc-attempts"
|
||||
md, ok := metadata.FromIncomingContext(stream.Context())
|
||||
if !ok {
|
||||
return status.Errorf(codes.Internal, "server: no header metadata received")
|
||||
}
|
||||
if got := md[key]; len(got) != 1 || got[0] != strconv.Itoa(n) {
|
||||
return status.Errorf(codes.Internal, "server: metadata = %v; want <contains %q: %q>", md, key, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sReq := func(b byte) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
want := req(b)
|
||||
if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) {
|
||||
return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, <nil>", got, err, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sReqPayload := func(p *testpb.Payload) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
want := &testpb.StreamingOutputCallRequest{Payload: p}
|
||||
if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) {
|
||||
return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, <nil>", got, err, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sRes := func(b byte) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
msg := res(b)
|
||||
if err := stream.Send(msg); err != nil {
|
||||
return status.Errorf(codes.Internal, "server: Send(%v) = %v; want <nil>", msg, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sErr := func(c codes.Code) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
return status.New(c, "").Err()
|
||||
}
|
||||
}
|
||||
sCloseSend := func() serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
if msg, err := stream.Recv(); msg != nil || err != io.EOF {
|
||||
return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want <nil>, io.EOF", msg, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sPushback := func(s string) serverOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
stream.SetTrailer(metadata.MD{"grpc-retry-pushback-ms": []string{s}})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Client Operations
|
||||
cReq := func(b byte) clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
msg := req(b)
|
||||
if err := stream.Send(msg); err != nil {
|
||||
return fmt.Errorf("client: Send(%v) = %v; want <nil>", msg, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cReqPayload := func(p *testpb.Payload) clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
msg := &testpb.StreamingOutputCallRequest{Payload: p}
|
||||
if err := stream.Send(msg); err != nil {
|
||||
return fmt.Errorf("client: Send(%v) = %v; want <nil>", msg, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cRes := func(b byte) clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
want := res(b)
|
||||
if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) {
|
||||
return fmt.Errorf("client: Recv() = %v, %v; want %v, <nil>", got, err, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cErr := func(c codes.Code) clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
want := status.New(c, "").Err()
|
||||
if c == codes.OK {
|
||||
want = io.EOF
|
||||
}
|
||||
res, err := stream.Recv()
|
||||
if res != nil ||
|
||||
((err == nil) != (want == nil)) ||
|
||||
(want != nil && !reflect.DeepEqual(err, want)) {
|
||||
return fmt.Errorf("client: Recv() = %v, %v; want <nil>, %v", res, err, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cCloseSend := func() clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
return fmt.Errorf("client: CloseSend() = %v; want <nil>", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
var curTime time.Time
|
||||
cGetTime := func() clientOp {
|
||||
return func(_ testpb.TestService_FullDuplexCallClient) error {
|
||||
curTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cCheckElapsed := func(d time.Duration) clientOp {
|
||||
return func(_ testpb.TestService_FullDuplexCallClient) error {
|
||||
if elapsed := time.Since(curTime); elapsed < d {
|
||||
return fmt.Errorf("Elapsed time: %v; want >= %v", elapsed, d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cHdr := func() clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
_, err := stream.Header()
|
||||
return err
|
||||
}
|
||||
}
|
||||
cCtx := func() clientOp {
|
||||
return func(stream testpb.TestService_FullDuplexCallClient) error {
|
||||
stream.Context()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
serverOps []serverOp
|
||||
clientOps []clientOp
|
||||
}{{
|
||||
desc: "Non-retryable error code",
|
||||
serverOps: []serverOp{sReq(1), sErr(codes.Internal)},
|
||||
clientOps: []clientOp{cReq(1), cErr(codes.Internal)},
|
||||
}, {
|
||||
desc: "One retry necessary",
|
||||
serverOps: []serverOp{sReq(1), sErr(codes.Unavailable), sReq(1), sAttempts(1), sRes(1)},
|
||||
clientOps: []clientOp{cReq(1), cRes(1), cErr(codes.OK)},
|
||||
}, {
|
||||
desc: "Exceed max attempts (4); check attempts header on server",
|
||||
serverOps: []serverOp{
|
||||
sReq(1), sErr(codes.Unavailable),
|
||||
sReq(1), sAttempts(1), sErr(codes.Unavailable),
|
||||
sAttempts(2), sReq(1), sErr(codes.Unavailable),
|
||||
sAttempts(3), sReq(1), sErr(codes.Unavailable),
|
||||
},
|
||||
clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "Multiple requests",
|
||||
serverOps: []serverOp{
|
||||
sReq(1), sReq(2), sErr(codes.Unavailable),
|
||||
sReq(1), sReq(2), sRes(5),
|
||||
},
|
||||
clientOps: []clientOp{cReq(1), cReq(2), cRes(5), cErr(codes.OK)},
|
||||
}, {
|
||||
desc: "Multiple successive requests",
|
||||
serverOps: []serverOp{
|
||||
sReq(1), sErr(codes.Unavailable),
|
||||
sReq(1), sReq(2), sErr(codes.Unavailable),
|
||||
sReq(1), sReq(2), sReq(3), sRes(5),
|
||||
},
|
||||
clientOps: []clientOp{cReq(1), cReq(2), cReq(3), cRes(5), cErr(codes.OK)},
|
||||
}, {
|
||||
desc: "No retry after receiving",
|
||||
serverOps: []serverOp{
|
||||
sReq(1), sErr(codes.Unavailable),
|
||||
sReq(1), sRes(3), sErr(codes.Unavailable),
|
||||
},
|
||||
clientOps: []clientOp{cReq(1), cRes(3), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "No retry after header",
|
||||
serverOps: []serverOp{sReq(1), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReq(1), cHdr(), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "No retry after context",
|
||||
serverOps: []serverOp{sReq(1), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReq(1), cCtx(), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "Replaying close send",
|
||||
serverOps: []serverOp{
|
||||
sReq(1), sReq(2), sCloseSend(), sErr(codes.Unavailable),
|
||||
sReq(1), sReq(2), sCloseSend(), sRes(1), sRes(3), sRes(5),
|
||||
},
|
||||
clientOps: []clientOp{cReq(1), cReq(2), cCloseSend(), cRes(1), cRes(3), cRes(5), cErr(codes.OK)},
|
||||
}, {
|
||||
desc: "Negative server pushback - no retry",
|
||||
serverOps: []serverOp{sReq(1), sPushback("-1"), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "Non-numeric server pushback - no retry",
|
||||
serverOps: []serverOp{sReq(1), sPushback("xxx"), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "Multiple server pushback values - no retry",
|
||||
serverOps: []serverOp{sReq(1), sPushback("100"), sPushback("10"), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)},
|
||||
}, {
|
||||
desc: "1s server pushback - delayed retry",
|
||||
serverOps: []serverOp{sReq(1), sPushback("1000"), sErr(codes.Unavailable), sReq(1), sRes(2)},
|
||||
clientOps: []clientOp{cGetTime(), cReq(1), cRes(2), cCheckElapsed(time.Second), cErr(codes.OK)},
|
||||
}, {
|
||||
desc: "Overflowing buffer - no retry",
|
||||
serverOps: []serverOp{sReqPayload(largePayload), sErr(codes.Unavailable)},
|
||||
clientOps: []clientOp{cReqPayload(largePayload), cErr(codes.Unavailable)},
|
||||
}}
|
||||
|
||||
var serverOpIter int
|
||||
var serverOps []serverOp
|
||||
ss := &stubServer{
|
||||
fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
|
||||
for serverOpIter < len(serverOps) {
|
||||
op := serverOps[serverOpIter]
|
||||
serverOpIter++
|
||||
if err := op(stream); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200))); err != nil {
|
||||
t.Fatalf("Error starting endpoint server: %v", err)
|
||||
}
|
||||
defer ss.Stop()
|
||||
ss.r.NewServiceConfig(`{
|
||||
"methodConfig": [{
|
||||
"name": [{"service": "grpc.testing.TestService"}],
|
||||
"waitForReady": true,
|
||||
"retryPolicy": {
|
||||
"MaxAttempts": 4,
|
||||
"InitialBackoff": ".01s",
|
||||
"MaxBackoff": ".01s",
|
||||
"BackoffMultiplier": 1.0,
|
||||
"RetryableStatusCodes": [ "UNAVAILABLE" ]
|
||||
}
|
||||
}]}`)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatalf("Timed out waiting for service config update")
|
||||
}
|
||||
if ss.cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
cancel()
|
||||
|
||||
for _, tc := range testCases {
|
||||
func() {
|
||||
serverOpIter = 0
|
||||
serverOps = tc.serverOps
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
stream, err := ss.client.FullDuplexCall(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: Error while creating stream: %v", tc.desc, err)
|
||||
}
|
||||
for _, op := range tc.clientOps {
|
||||
if err := op(stream); err != nil {
|
||||
t.Errorf("%v: %v", tc.desc, err)
|
||||
break
|
||||
}
|
||||
}
|
||||
if serverOpIter != len(serverOps) {
|
||||
t.Errorf("%v: serverOpIter = %v; want %v", tc.desc, serverOpIter, len(serverOps))
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
34
vendor/google.golang.org/grpc/test/tools/tools.go
generated
vendored
Normal file
34
vendor/google.golang.org/grpc/test/tools/tools.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// +build tools
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This package exists to cause `go mod` and `go get` to believe these tools
|
||||
// are dependencies, even though they are not runtime dependencies of any grpc
|
||||
// package. This means they will appear in our `go.mod` file, but will not be
|
||||
// a part of the build.
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
_ "github.com/golang/lint/golint"
|
||||
_ "github.com/golang/protobuf/protoc-gen-go"
|
||||
_ "golang.org/x/tools/cmd/goimports"
|
||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||
)
|
Reference in New Issue
Block a user