vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -66,6 +66,7 @@ import (
"google.golang.org/grpc/benchmark/latency"
"google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/test/bufconn"
)
const (
@ -100,6 +101,7 @@ var (
memProfile, cpuProfile string
memProfileRate int
enableCompressor []bool
enableChannelz []bool
networkMode string
benchmarkResultFile string
networks = map[string]latency.Network{
@ -137,13 +139,31 @@ func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) {
)
}
sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
}))
opts = append(opts, grpc.WithInsecure())
target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...)
conn := bm.NewClientConn(target, opts...)
var lis net.Listener
if *useBufconn {
bcLis := bufconn.Listen(256 * 1024)
lis = bcLis
opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(
func(string, string, time.Duration) (net.Conn, error) {
return bcLis.Dial()
})("", "", 0)
}))
} else {
var err error
lis, err = net.Listen("tcp", "localhost:0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
}))
}
lis = nw.Listener(lis)
stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
conn := bm.NewClientConn("" /* target not used */, opts...)
tc := testpb.NewBenchmarkServiceClient(conn)
return func(int) {
unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
@ -154,6 +174,7 @@ func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) {
}
func makeFuncStream(benchFeatures stats.Features) (func(int), func()) {
// TODO: Refactor to remove duplication with makeFuncUnary.
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
opts := []grpc.DialOption{}
sopts := []grpc.ServerOption{}
@ -168,13 +189,31 @@ func makeFuncStream(benchFeatures stats.Features) (func(int), func()) {
)
}
sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
}))
opts = append(opts, grpc.WithInsecure())
target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...)
conn := bm.NewClientConn(target, opts...)
var lis net.Listener
if *useBufconn {
bcLis := bufconn.Listen(256 * 1024)
lis = bcLis
opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(
func(string, string, time.Duration) (net.Conn, error) {
return bcLis.Dial()
})("", "", 0)
}))
} else {
var err error
lis, err = net.Listen("tcp", "localhost:0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
}))
}
lis = nw.Listener(lis)
stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
conn := bm.NewClientConn("" /* target not used */, opts...)
tc := testpb.NewBenchmarkServiceClient(conn)
streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls)
for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
@ -240,18 +279,21 @@ func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), be
stopTimer(count)
}
var useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
// Initiate main function to get settings of features.
func init() {
var (
workloads, traceMode, compressorMode, readLatency string
readKbps, readMtu, readMaxConcurrentCalls intSliceType
readReqSizeBytes, readRespSizeBytes intSliceType
workloads, traceMode, compressorMode, readLatency, channelzOn string
readKbps, readMtu, readMaxConcurrentCalls intSliceType
readReqSizeBytes, readRespSizeBytes intSliceType
)
flag.StringVar(&workloads, "workloads", workloadsAll,
fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")))
flag.StringVar(&traceMode, "trace", modeOff,
fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", ")))
flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list")
flag.StringVar(&channelzOn, "channelz", modeOff, "whether channelz should be turned on")
flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark")
flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list")
flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list")
@ -287,6 +329,7 @@ func init() {
}
enableCompressor = setMode(compressorMode)
enableTrace = setMode(traceMode)
enableChannelz = setMode(channelzOn)
// Time input formats as (time + unit).
readTimeFromInput(&ltc, readLatency)
readIntFromIntSlice(&kbps, readKbps)
@ -360,10 +403,10 @@ func readTimeFromInput(values *[]time.Duration, replace string) {
func main() {
before()
featuresPos := make([]int, 8)
featuresPos := make([]int, 9)
// 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize
featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu),
len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor)}
len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor), len(enableChannelz)}
initalPos := make([]int, len(featuresPos))
s := stats.NewStats(10)
s.SortLatency()
@ -380,7 +423,7 @@ func main() {
}
var stopTimer = func(count int32) {
runtime.ReadMemStats(&memStats)
results = testing.BenchmarkResult{N: int(count), T: time.Now().Sub(startTime),
results = testing.BenchmarkResult{N: int(count), T: time.Since(startTime),
Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes}
}
sharedPos := make([]bool, len(featuresPos))
@ -404,9 +447,13 @@ func main() {
ReqSizeBytes: reqSizeBytes[featuresPos[5]],
RespSizeBytes: respSizeBytes[featuresPos[6]],
EnableCompressor: enableCompressor[featuresPos[7]],
EnableChannelz: enableChannelz[featuresPos[8]],
}
grpc.EnableTracing = enableTrace[featuresPos[0]]
if enableChannelz[featuresPos[8]] {
grpc.RegisterChannelz()
}
if runMode[0] {
unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
s.SetBenchmarkResult("Unary", benchFeature, results.N,

View File

@ -65,7 +65,6 @@ func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) {
}
p.Type = t
p.Body = body
return
}
func newPayload(t testpb.PayloadType, size int) *testpb.Payload {
@ -136,9 +135,6 @@ func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCa
// ServerInfo contains the information to create a gRPC benchmark server.
type ServerInfo struct {
// Addr is the address of the server.
Addr string
// Type is the type of the server.
// It should be "protobuf" or "bytebuf".
Type string
@ -148,21 +144,13 @@ type ServerInfo struct {
// For "bytebuf", it should be an int representing response size.
Metadata interface{}
// Network can simulate latency
Network *latency.Network
// Listener is the network listener for the server to use
Listener net.Listener
}
// StartServer starts a gRPC server serving a benchmark service according to info.
// It returns its listen address and a function to stop the server.
func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) {
lis, err := net.Listen("tcp", info.Addr)
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
nw := info.Network
if nw != nil {
lis = nw.Listener(lis)
}
// It returns a function to stop the server.
func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() {
opts = append(opts, grpc.WriteBufferSize(128*1024))
opts = append(opts, grpc.ReadBufferSize(128*1024))
s := grpc.NewServer(opts...)
@ -178,8 +166,8 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) {
default:
grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type)
}
go s.Serve(lis)
return lis.Addr().String(), func() {
go s.Serve(info.Listener)
return func() {
s.Stop()
}
}
@ -238,9 +226,14 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli
// NewClientConn creates a gRPC client connection to addr.
func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn {
return NewClientConnWithContext(context.Background(), addr, opts...)
}
// NewClientConnWithContext creates a gRPC client connection to addr using ctx.
func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn {
opts = append(opts, grpc.WithWriteBufferSize(128*1024))
opts = append(opts, grpc.WithReadBufferSize(128*1024))
conn, err := grpc.Dial(addr, opts...)
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err)
}
@ -250,7 +243,13 @@ func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn {
func runUnary(b *testing.B, benchFeatures stats.Features) {
s := stats.AddStats(b, 38)
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
target := lis.Addr().String()
lis = nw.Listener(lis)
stopper := StartServer(ServerInfo{Type: "protobuf", Listener: lis}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
defer stopper()
conn := NewClientConn(
target, grpc.WithInsecure(),
@ -298,7 +297,13 @@ func runUnary(b *testing.B, benchFeatures stats.Features) {
func runStream(b *testing.B, benchFeatures stats.Features) {
s := stats.AddStats(b, 38)
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
target := lis.Addr().String()
lis = nw.Listener(lis)
stopper := StartServer(ServerInfo{Type: "protobuf", Listener: lis}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
defer stopper()
conn := NewClientConn(
target, grpc.WithInsecure(),

View File

@ -30,81 +30,81 @@ import (
func BenchmarkClientStreamc1(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false, false})
}
func BenchmarkClientStreamc8(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false, false})
}
func BenchmarkClientStreamc64(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false, false})
}
func BenchmarkClientStreamc512(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false, false})
}
func BenchmarkClientUnaryc1(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false, false})
}
func BenchmarkClientUnaryc8(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false, false})
}
func BenchmarkClientUnaryc64(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false, false})
}
func BenchmarkClientUnaryc512(b *testing.B) {
grpc.EnableTracing = true
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false})
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false, false})
}
func BenchmarkClientStreamNoTracec1(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false, false})
}
func BenchmarkClientStreamNoTracec8(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false, false})
}
func BenchmarkClientStreamNoTracec64(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false, false})
}
func BenchmarkClientStreamNoTracec512(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
}
func BenchmarkClientUnaryNoTracec1(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false, false})
}
func BenchmarkClientUnaryNoTracec8(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false, false})
}
func BenchmarkClientUnaryNoTracec64(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false, false})
}
func BenchmarkClientUnaryNoTracec512(b *testing.B) {
grpc.EnableTracing = false
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
}
func TestMain(m *testing.M) {

View File

@ -20,14 +20,15 @@ package main
import (
"flag"
"math"
"net"
"net/http"
_ "net/http/pprof"
"fmt"
"os"
"runtime"
"runtime/pprof"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"google.golang.org/grpc/benchmark"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
@ -36,145 +37,159 @@ import (
)
var (
server = flag.String("server", "", "The server address")
maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs")
duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client")
trace = flag.Bool("trace", true, "Whether tracing is on")
rpcType = flag.Int("rpc_type", 0,
port = flag.String("port", "50051", "Localhost port to connect to.")
numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
numConn = flag.Int("c", 1, "The number of parallel connections.")
warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
duration = flag.Int("d", 60, "Benchmark duration in seconds")
rqSize = flag.Int("req", 1, "Request message size in bytes.")
rspSize = flag.Int("resp", 1, "Response message size in bytes.")
rpcType = flag.String("rpc_type", "unary",
`Configure different client rpc type. Valid options are:
0 : unary call;
1 : streaming call.`)
unary;
streaming.`)
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
wg sync.WaitGroup
hopts = stats.HistogramOptions{
NumBuckets: 2495,
GrowthFactor: .01,
}
mu sync.Mutex
hists []*stats.Histogram
)
func unaryCaller(client testpb.BenchmarkServiceClient) {
benchmark.DoUnaryCall(client, 1, 1)
}
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) {
benchmark.DoStreamingRoundTrip(stream, 1, 1)
}
func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) {
s = stats.NewStats(256)
conn = benchmark.NewClientConn(*server)
tc = testpb.NewBenchmarkServiceClient(conn)
return s, conn, tc
}
func closeLoopUnary() {
s, conn, tc := buildConnection()
for i := 0; i < 100; i++ {
unaryCaller(tc)
}
ch := make(chan int, *maxConcurrentRPCs*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(*maxConcurrentRPCs)
for i := 0; i < *maxConcurrentRPCs; i++ {
go func() {
for range ch {
start := time.Now()
unaryCaller(tc)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
// Stop the client when time is up.
done := make(chan struct{})
go func() {
<-time.After(time.Duration(*duration) * time.Second)
close(done)
}()
ok := true
for ok {
select {
case ch <- 0:
case <-done:
ok = false
}
}
close(ch)
wg.Wait()
conn.Close()
grpclog.Println(s.String())
}
func closeLoopStream() {
s, conn, tc := buildConnection()
ch := make(chan int, *maxConcurrentRPCs*4)
var (
mu sync.Mutex
wg sync.WaitGroup
)
wg.Add(*maxConcurrentRPCs)
// Distribute RPCs over maxConcurrentCalls workers.
for i := 0; i < *maxConcurrentRPCs; i++ {
go func() {
stream, err := tc.StreamingCall(context.Background())
if err != nil {
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
}
// Do some warm up.
for i := 0; i < 100; i++ {
streamCaller(stream)
}
for range ch {
start := time.Now()
streamCaller(stream)
elapse := time.Since(start)
mu.Lock()
s.Add(elapse)
mu.Unlock()
}
wg.Done()
}()
}
// Stop the client when time is up.
done := make(chan struct{})
go func() {
<-time.After(time.Duration(*duration) * time.Second)
close(done)
}()
ok := true
for ok {
select {
case ch <- 0:
case <-done:
ok = false
}
}
close(ch)
wg.Wait()
conn.Close()
grpclog.Println(s.String())
}
func main() {
flag.Parse()
grpc.EnableTracing = *trace
go func() {
lis, err := net.Listen("tcp", ":0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
grpclog.Println("Client profiling address: ", lis.Addr().String())
if err := http.Serve(lis, nil); err != nil {
grpclog.Fatalf("Failed to serve: %v", err)
}
}()
switch *rpcType {
case 0:
closeLoopUnary()
case 1:
closeLoopStream()
if *testName == "" {
grpclog.Fatalf("test_name not set")
}
req := &testpb.SimpleRequest{
ResponseType: testpb.PayloadType_COMPRESSABLE,
ResponseSize: int32(*rspSize),
Payload: &testpb.Payload{
Type: testpb.PayloadType_COMPRESSABLE,
Body: make([]byte, *rqSize),
},
}
connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer connectCancel()
ccs := buildConnections(connectCtx)
warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
cf, err := os.Create("/tmp/" + *testName + ".cpu")
if err != nil {
grpclog.Fatalf("Error creating file: %v", err)
}
defer cf.Close()
pprof.StartCPUProfile(cf)
cpuBeg := getCPUTime()
for _, cc := range ccs {
runWithConn(cc, req, warmDeadline, endDeadline)
}
wg.Wait()
cpu := time.Duration(getCPUTime() - cpuBeg)
pprof.StopCPUProfile()
mf, err := os.Create("/tmp/" + *testName + ".mem")
if err != nil {
grpclog.Fatalf("Error creating file: %v", err)
}
defer mf.Close()
runtime.GC() // materialize all statistics
if err := pprof.WriteHeapProfile(mf); err != nil {
grpclog.Fatalf("Error writing memory profile: %v", err)
}
hist := stats.NewHistogram(hopts)
for _, h := range hists {
hist.Merge(h)
}
parseHist(hist)
fmt.Println("Client CPU utilization:", cpu)
fmt.Println("Client CPU profile:", cf.Name())
fmt.Println("Client Mem Profile:", mf.Name())
}
func buildConnections(ctx context.Context) []*grpc.ClientConn {
ccs := make([]*grpc.ClientConn, *numConn)
for i := range ccs {
ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock())
}
return ccs
}
func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
for i := 0; i < *numRPC; i++ {
wg.Add(1)
go func() {
defer wg.Done()
caller := makeCaller(cc, req)
hist := stats.NewHistogram(hopts)
for {
start := time.Now()
if start.After(endDeadline) {
mu.Lock()
hists = append(hists, hist)
mu.Unlock()
return
}
caller()
elapsed := time.Since(start)
if start.After(warmDeadline) {
hist.Add(elapsed.Nanoseconds())
}
}
}()
}
}
func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
client := testpb.NewBenchmarkServiceClient(cc)
if *rpcType == "unary" {
return func() {
if _, err := client.UnaryCall(context.Background(), req); err != nil {
grpclog.Fatalf("RPC failed: %v", err)
}
}
}
stream, err := client.StreamingCall(context.Background())
if err != nil {
grpclog.Fatalf("RPC failed: %v", err)
}
return func() {
if err := stream.Send(req); err != nil {
grpclog.Fatalf("Streaming RPC failed to send: %v", err)
}
if _, err := stream.Recv(); err != nil {
grpclog.Fatalf("Streaming RPC failed to read: %v", err)
}
}
}
func parseHist(hist *stats.Histogram) {
fmt.Println("qps:", float64(hist.Count)/float64(*duration))
fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
time.Duration(median(.5, hist)),
time.Duration(median(.9, hist)),
time.Duration(median(.99, hist)))
}
func median(percentile float64, h *stats.Histogram) int64 {
need := int64(float64(h.Count) * percentile)
have := int64(0)
for _, bucket := range h.Buckets {
count := bucket.Count
if have+count >= need {
percent := float64(need-have) / float64(count)
return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
}
have += bucket.Count
}
panic("should have found a bound")
}
func getCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
grpclog.Fatal(err)
}
return ts.Nano()
}

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,12 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The type of payload that should be returned.
type PayloadType int32
@ -38,7 +44,9 @@ var PayloadType_value = map[string]int32{
func (x PayloadType) String() string {
return proto.EnumName(PayloadType_name, int32(x))
}
func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (PayloadType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{0}
}
// Compression algorithms
type CompressionType int32
@ -64,20 +72,44 @@ var CompressionType_value = map[string]int32{
func (x CompressionType) String() string {
return proto.EnumName(CompressionType_name, int32(x))
}
func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (CompressionType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{1}
}
// A block of data, to simply increase gRPC message size.
type Payload struct {
// The type of data in body.
Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"`
Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"`
// Primary contents of payload.
Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Payload) Reset() { *m = Payload{} }
func (m *Payload) String() string { return proto.CompactTextString(m) }
func (*Payload) ProtoMessage() {}
func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Payload) Reset() { *m = Payload{} }
func (m *Payload) String() string { return proto.CompactTextString(m) }
func (*Payload) ProtoMessage() {}
func (*Payload) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{0}
}
func (m *Payload) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Payload.Unmarshal(m, b)
}
func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Payload.Marshal(b, m, deterministic)
}
func (dst *Payload) XXX_Merge(src proto.Message) {
xxx_messageInfo_Payload.Merge(dst, src)
}
func (m *Payload) XXX_Size() int {
return xxx_messageInfo_Payload.Size(m)
}
func (m *Payload) XXX_DiscardUnknown() {
xxx_messageInfo_Payload.DiscardUnknown(m)
}
var xxx_messageInfo_Payload proto.InternalMessageInfo
func (m *Payload) GetType() PayloadType {
if m != nil {
@ -96,14 +128,36 @@ func (m *Payload) GetBody() []byte {
// A protobuf representation for grpc status. This is used by test
// clients to specify a status that the server should attempt to return.
type EchoStatus struct {
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EchoStatus) Reset() { *m = EchoStatus{} }
func (m *EchoStatus) String() string { return proto.CompactTextString(m) }
func (*EchoStatus) ProtoMessage() {}
func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *EchoStatus) Reset() { *m = EchoStatus{} }
func (m *EchoStatus) String() string { return proto.CompactTextString(m) }
func (*EchoStatus) ProtoMessage() {}
func (*EchoStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{1}
}
func (m *EchoStatus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EchoStatus.Unmarshal(m, b)
}
func (m *EchoStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EchoStatus.Marshal(b, m, deterministic)
}
func (dst *EchoStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_EchoStatus.Merge(dst, src)
}
func (m *EchoStatus) XXX_Size() int {
return xxx_messageInfo_EchoStatus.Size(m)
}
func (m *EchoStatus) XXX_DiscardUnknown() {
xxx_messageInfo_EchoStatus.DiscardUnknown(m)
}
var xxx_messageInfo_EchoStatus proto.InternalMessageInfo
func (m *EchoStatus) GetCode() int32 {
if m != nil {
@ -123,26 +177,48 @@ func (m *EchoStatus) GetMessage() string {
type SimpleRequest struct {
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"`
ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"`
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"`
Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
// Whether SimpleResponse should include username.
FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"`
FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"`
// Whether SimpleResponse should include OAuth scope.
FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"`
FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"`
// Compression algorithm to be used by the server for the response (stream)
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
// Whether server should return a given status
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"`
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SimpleRequest) Reset() { *m = SimpleRequest{} }
func (m *SimpleRequest) String() string { return proto.CompactTextString(m) }
func (*SimpleRequest) ProtoMessage() {}
func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *SimpleRequest) Reset() { *m = SimpleRequest{} }
func (m *SimpleRequest) String() string { return proto.CompactTextString(m) }
func (*SimpleRequest) ProtoMessage() {}
func (*SimpleRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{2}
}
func (m *SimpleRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SimpleRequest.Unmarshal(m, b)
}
func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic)
}
func (dst *SimpleRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SimpleRequest.Merge(dst, src)
}
func (m *SimpleRequest) XXX_Size() int {
return xxx_messageInfo_SimpleRequest.Size(m)
}
func (m *SimpleRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SimpleRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo
func (m *SimpleRequest) GetResponseType() PayloadType {
if m != nil {
@ -196,18 +272,40 @@ func (m *SimpleRequest) GetResponseStatus() *EchoStatus {
// Unary response, as configured by the request.
type SimpleResponse struct {
// Payload to increase message size.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
// The user the request came from, for verifying authentication was
// successful when the client expected it.
Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"`
Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
// OAuth scope.
OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"`
OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SimpleResponse) Reset() { *m = SimpleResponse{} }
func (m *SimpleResponse) String() string { return proto.CompactTextString(m) }
func (*SimpleResponse) ProtoMessage() {}
func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
func (m *SimpleResponse) Reset() { *m = SimpleResponse{} }
func (m *SimpleResponse) String() string { return proto.CompactTextString(m) }
func (*SimpleResponse) ProtoMessage() {}
func (*SimpleResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{3}
}
func (m *SimpleResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SimpleResponse.Unmarshal(m, b)
}
func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic)
}
func (dst *SimpleResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SimpleResponse.Merge(dst, src)
}
func (m *SimpleResponse) XXX_Size() int {
return xxx_messageInfo_SimpleResponse.Size(m)
}
func (m *SimpleResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SimpleResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo
func (m *SimpleResponse) GetPayload() *Payload {
if m != nil {
@ -233,13 +331,35 @@ func (m *SimpleResponse) GetOauthScope() string {
// Client-streaming request.
type StreamingInputCallRequest struct {
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} }
func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallRequest) ProtoMessage() {}
func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} }
func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallRequest) ProtoMessage() {}
func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{4}
}
func (m *StreamingInputCallRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamingInputCallRequest.Unmarshal(m, b)
}
func (m *StreamingInputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamingInputCallRequest.Marshal(b, m, deterministic)
}
func (dst *StreamingInputCallRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamingInputCallRequest.Merge(dst, src)
}
func (m *StreamingInputCallRequest) XXX_Size() int {
return xxx_messageInfo_StreamingInputCallRequest.Size(m)
}
func (m *StreamingInputCallRequest) XXX_DiscardUnknown() {
xxx_messageInfo_StreamingInputCallRequest.DiscardUnknown(m)
}
var xxx_messageInfo_StreamingInputCallRequest proto.InternalMessageInfo
func (m *StreamingInputCallRequest) GetPayload() *Payload {
if m != nil {
@ -251,13 +371,35 @@ func (m *StreamingInputCallRequest) GetPayload() *Payload {
// Client-streaming response.
type StreamingInputCallResponse struct {
// Aggregated size of payloads received from the client.
AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"`
AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} }
func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallResponse) ProtoMessage() {}
func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} }
func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingInputCallResponse) ProtoMessage() {}
func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{5}
}
func (m *StreamingInputCallResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamingInputCallResponse.Unmarshal(m, b)
}
func (m *StreamingInputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamingInputCallResponse.Marshal(b, m, deterministic)
}
func (dst *StreamingInputCallResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamingInputCallResponse.Merge(dst, src)
}
func (m *StreamingInputCallResponse) XXX_Size() int {
return xxx_messageInfo_StreamingInputCallResponse.Size(m)
}
func (m *StreamingInputCallResponse) XXX_DiscardUnknown() {
xxx_messageInfo_StreamingInputCallResponse.DiscardUnknown(m)
}
var xxx_messageInfo_StreamingInputCallResponse proto.InternalMessageInfo
func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 {
if m != nil {
@ -270,16 +412,38 @@ func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 {
type ResponseParameters struct {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"`
Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"`
// Desired interval between consecutive responses in the response stream in
// microseconds.
IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"`
IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResponseParameters) Reset() { *m = ResponseParameters{} }
func (m *ResponseParameters) String() string { return proto.CompactTextString(m) }
func (*ResponseParameters) ProtoMessage() {}
func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
func (m *ResponseParameters) Reset() { *m = ResponseParameters{} }
func (m *ResponseParameters) String() string { return proto.CompactTextString(m) }
func (*ResponseParameters) ProtoMessage() {}
func (*ResponseParameters) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{6}
}
func (m *ResponseParameters) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResponseParameters.Unmarshal(m, b)
}
func (m *ResponseParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResponseParameters.Marshal(b, m, deterministic)
}
func (dst *ResponseParameters) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResponseParameters.Merge(dst, src)
}
func (m *ResponseParameters) XXX_Size() int {
return xxx_messageInfo_ResponseParameters.Size(m)
}
func (m *ResponseParameters) XXX_DiscardUnknown() {
xxx_messageInfo_ResponseParameters.DiscardUnknown(m)
}
var xxx_messageInfo_ResponseParameters proto.InternalMessageInfo
func (m *ResponseParameters) GetSize() int32 {
if m != nil {
@ -301,21 +465,43 @@ type StreamingOutputCallRequest struct {
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
// stream.
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
// Configuration for each expected response message.
ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"`
ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"`
// Optional input payload sent along with the request.
Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"`
Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
// Compression algorithm to be used by the server for the response (stream)
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
// Whether server should return a given status
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"`
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} }
func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallRequest) ProtoMessage() {}
func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} }
func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} }
func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallRequest) ProtoMessage() {}
func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{7}
}
func (m *StreamingOutputCallRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamingOutputCallRequest.Unmarshal(m, b)
}
func (m *StreamingOutputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamingOutputCallRequest.Marshal(b, m, deterministic)
}
func (dst *StreamingOutputCallRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamingOutputCallRequest.Merge(dst, src)
}
func (m *StreamingOutputCallRequest) XXX_Size() int {
return xxx_messageInfo_StreamingOutputCallRequest.Size(m)
}
func (m *StreamingOutputCallRequest) XXX_DiscardUnknown() {
xxx_messageInfo_StreamingOutputCallRequest.DiscardUnknown(m)
}
var xxx_messageInfo_StreamingOutputCallRequest proto.InternalMessageInfo
func (m *StreamingOutputCallRequest) GetResponseType() PayloadType {
if m != nil {
@ -355,13 +541,35 @@ func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus {
// Server-streaming response, as configured by the request and parameters.
type StreamingOutputCallResponse struct {
// Payload to increase response size.
Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"`
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} }
func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallResponse) ProtoMessage() {}
func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} }
func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} }
func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }
func (*StreamingOutputCallResponse) ProtoMessage() {}
func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{8}
}
func (m *StreamingOutputCallResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamingOutputCallResponse.Unmarshal(m, b)
}
func (m *StreamingOutputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamingOutputCallResponse.Marshal(b, m, deterministic)
}
func (dst *StreamingOutputCallResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamingOutputCallResponse.Merge(dst, src)
}
func (m *StreamingOutputCallResponse) XXX_Size() int {
return xxx_messageInfo_StreamingOutputCallResponse.Size(m)
}
func (m *StreamingOutputCallResponse) XXX_DiscardUnknown() {
xxx_messageInfo_StreamingOutputCallResponse.DiscardUnknown(m)
}
var xxx_messageInfo_StreamingOutputCallResponse proto.InternalMessageInfo
func (m *StreamingOutputCallResponse) GetPayload() *Payload {
if m != nil {
@ -373,13 +581,35 @@ func (m *StreamingOutputCallResponse) GetPayload() *Payload {
// For reconnect interop test only.
// Client tells server what reconnection parameters it used.
type ReconnectParams struct {
MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"`
MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs,proto3" json:"max_reconnect_backoff_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReconnectParams) Reset() { *m = ReconnectParams{} }
func (m *ReconnectParams) String() string { return proto.CompactTextString(m) }
func (*ReconnectParams) ProtoMessage() {}
func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} }
func (m *ReconnectParams) Reset() { *m = ReconnectParams{} }
func (m *ReconnectParams) String() string { return proto.CompactTextString(m) }
func (*ReconnectParams) ProtoMessage() {}
func (*ReconnectParams) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{9}
}
func (m *ReconnectParams) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReconnectParams.Unmarshal(m, b)
}
func (m *ReconnectParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReconnectParams.Marshal(b, m, deterministic)
}
func (dst *ReconnectParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReconnectParams.Merge(dst, src)
}
func (m *ReconnectParams) XXX_Size() int {
return xxx_messageInfo_ReconnectParams.Size(m)
}
func (m *ReconnectParams) XXX_DiscardUnknown() {
xxx_messageInfo_ReconnectParams.DiscardUnknown(m)
}
var xxx_messageInfo_ReconnectParams proto.InternalMessageInfo
func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 {
if m != nil {
@ -392,14 +622,36 @@ func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 {
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
type ReconnectInfo struct {
Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"`
BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"`
Passed bool `protobuf:"varint,1,opt,name=passed,proto3" json:"passed,omitempty"`
BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs,proto3" json:"backoff_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} }
func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) }
func (*ReconnectInfo) ProtoMessage() {}
func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} }
func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} }
func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) }
func (*ReconnectInfo) ProtoMessage() {}
func (*ReconnectInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_messages_5c70222ad96bf232, []int{10}
}
func (m *ReconnectInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReconnectInfo.Unmarshal(m, b)
}
func (m *ReconnectInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReconnectInfo.Marshal(b, m, deterministic)
}
func (dst *ReconnectInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReconnectInfo.Merge(dst, src)
}
func (m *ReconnectInfo) XXX_Size() int {
return xxx_messageInfo_ReconnectInfo.Size(m)
}
func (m *ReconnectInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ReconnectInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ReconnectInfo proto.InternalMessageInfo
func (m *ReconnectInfo) GetPassed() bool {
if m != nil {
@ -431,9 +683,9 @@ func init() {
proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value)
}
func init() { proto.RegisterFile("messages.proto", fileDescriptor1) }
func init() { proto.RegisterFile("messages.proto", fileDescriptor_messages_5c70222ad96bf232) }
var fileDescriptor1 = []byte{
var fileDescriptor_messages_5c70222ad96bf232 = []byte{
// 652 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89,

View File

@ -12,15 +12,43 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ByteBufferParams struct {
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"`
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} }
func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) }
func (*ByteBufferParams) ProtoMessage() {}
func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} }
func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) }
func (*ByteBufferParams) ProtoMessage() {}
func (*ByteBufferParams) Descriptor() ([]byte, []int) {
return fileDescriptor_payloads_3abc71de35f06c83, []int{0}
}
func (m *ByteBufferParams) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ByteBufferParams.Unmarshal(m, b)
}
func (m *ByteBufferParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ByteBufferParams.Marshal(b, m, deterministic)
}
func (dst *ByteBufferParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_ByteBufferParams.Merge(dst, src)
}
func (m *ByteBufferParams) XXX_Size() int {
return xxx_messageInfo_ByteBufferParams.Size(m)
}
func (m *ByteBufferParams) XXX_DiscardUnknown() {
xxx_messageInfo_ByteBufferParams.DiscardUnknown(m)
}
var xxx_messageInfo_ByteBufferParams proto.InternalMessageInfo
func (m *ByteBufferParams) GetReqSize() int32 {
if m != nil {
@ -37,14 +65,36 @@ func (m *ByteBufferParams) GetRespSize() int32 {
}
type SimpleProtoParams struct {
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"`
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"`
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} }
func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) }
func (*SimpleProtoParams) ProtoMessage() {}
func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} }
func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) }
func (*SimpleProtoParams) ProtoMessage() {}
func (*SimpleProtoParams) Descriptor() ([]byte, []int) {
return fileDescriptor_payloads_3abc71de35f06c83, []int{1}
}
func (m *SimpleProtoParams) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SimpleProtoParams.Unmarshal(m, b)
}
func (m *SimpleProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SimpleProtoParams.Marshal(b, m, deterministic)
}
func (dst *SimpleProtoParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_SimpleProtoParams.Merge(dst, src)
}
func (m *SimpleProtoParams) XXX_Size() int {
return xxx_messageInfo_SimpleProtoParams.Size(m)
}
func (m *SimpleProtoParams) XXX_DiscardUnknown() {
xxx_messageInfo_SimpleProtoParams.DiscardUnknown(m)
}
var xxx_messageInfo_SimpleProtoParams proto.InternalMessageInfo
func (m *SimpleProtoParams) GetReqSize() int32 {
if m != nil {
@ -61,38 +111,82 @@ func (m *SimpleProtoParams) GetRespSize() int32 {
}
type ComplexProtoParams struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} }
func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) }
func (*ComplexProtoParams) ProtoMessage() {}
func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} }
func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) }
func (*ComplexProtoParams) ProtoMessage() {}
func (*ComplexProtoParams) Descriptor() ([]byte, []int) {
return fileDescriptor_payloads_3abc71de35f06c83, []int{2}
}
func (m *ComplexProtoParams) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ComplexProtoParams.Unmarshal(m, b)
}
func (m *ComplexProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ComplexProtoParams.Marshal(b, m, deterministic)
}
func (dst *ComplexProtoParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_ComplexProtoParams.Merge(dst, src)
}
func (m *ComplexProtoParams) XXX_Size() int {
return xxx_messageInfo_ComplexProtoParams.Size(m)
}
func (m *ComplexProtoParams) XXX_DiscardUnknown() {
xxx_messageInfo_ComplexProtoParams.DiscardUnknown(m)
}
var xxx_messageInfo_ComplexProtoParams proto.InternalMessageInfo
type PayloadConfig struct {
// Types that are valid to be assigned to Payload:
// *PayloadConfig_BytebufParams
// *PayloadConfig_SimpleParams
// *PayloadConfig_ComplexParams
Payload isPayloadConfig_Payload `protobuf_oneof:"payload"`
Payload isPayloadConfig_Payload `protobuf_oneof:"payload"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PayloadConfig) Reset() { *m = PayloadConfig{} }
func (m *PayloadConfig) String() string { return proto.CompactTextString(m) }
func (*PayloadConfig) ProtoMessage() {}
func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
func (m *PayloadConfig) Reset() { *m = PayloadConfig{} }
func (m *PayloadConfig) String() string { return proto.CompactTextString(m) }
func (*PayloadConfig) ProtoMessage() {}
func (*PayloadConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_payloads_3abc71de35f06c83, []int{3}
}
func (m *PayloadConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayloadConfig.Unmarshal(m, b)
}
func (m *PayloadConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PayloadConfig.Marshal(b, m, deterministic)
}
func (dst *PayloadConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_PayloadConfig.Merge(dst, src)
}
func (m *PayloadConfig) XXX_Size() int {
return xxx_messageInfo_PayloadConfig.Size(m)
}
func (m *PayloadConfig) XXX_DiscardUnknown() {
xxx_messageInfo_PayloadConfig.DiscardUnknown(m)
}
var xxx_messageInfo_PayloadConfig proto.InternalMessageInfo
type isPayloadConfig_Payload interface {
isPayloadConfig_Payload()
}
type PayloadConfig_BytebufParams struct {
BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"`
BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,proto3,oneof"`
}
type PayloadConfig_SimpleParams struct {
SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"`
SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,proto3,oneof"`
}
type PayloadConfig_ComplexParams struct {
ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"`
ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,proto3,oneof"`
}
func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {}
@ -200,17 +294,17 @@ func _PayloadConfig_OneofSizer(msg proto.Message) (n int) {
switch x := m.Payload.(type) {
case *PayloadConfig_BytebufParams:
s := proto.Size(x.BytebufParams)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *PayloadConfig_SimpleParams:
s := proto.Size(x.SimpleParams)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *PayloadConfig_ComplexParams:
s := proto.Size(x.ComplexParams)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
@ -227,9 +321,9 @@ func init() {
proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig")
}
func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) }
func init() { proto.RegisterFile("payloads.proto", fileDescriptor_payloads_3abc71de35f06c83) }
var fileDescriptor2 = []byte{
var fileDescriptor_payloads_3abc71de35f06c83 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc,
0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48,

View File

@ -17,6 +17,12 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
@ -25,8 +31,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for BenchmarkService service
// BenchmarkServiceClient is the client API for BenchmarkService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type BenchmarkServiceClient interface {
// One request followed by one response.
// The server returns the client payload as-is.
@ -46,7 +53,7 @@ func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient {
func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {
out := new(SimpleResponse)
err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, opts...)
if err != nil {
return nil, err
}
@ -54,7 +61,7 @@ func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleReques
}
func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) {
stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...)
stream, err := c.cc.NewStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], "/grpc.testing.BenchmarkService/StreamingCall", opts...)
if err != nil {
return nil, err
}
@ -84,8 +91,7 @@ func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) {
return m, nil
}
// Server API for BenchmarkService service
// BenchmarkServiceServer is the server API for BenchmarkService service.
type BenchmarkServiceServer interface {
// One request followed by one response.
// The server returns the client payload as-is.
@ -163,8 +169,9 @@ var _BenchmarkService_serviceDesc = grpc.ServiceDesc{
Metadata: "services.proto",
}
// Client API for WorkerService service
// WorkerServiceClient is the client API for WorkerService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type WorkerServiceClient interface {
// Start server with specified workload.
// First request sent specifies the ServerConfig followed by ServerStatus
@ -195,7 +202,7 @@ func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient {
}
func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) {
stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...)
stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[0], "/grpc.testing.WorkerService/RunServer", opts...)
if err != nil {
return nil, err
}
@ -226,7 +233,7 @@ func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) {
}
func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) {
stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...)
stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[1], "/grpc.testing.WorkerService/RunClient", opts...)
if err != nil {
return nil, err
}
@ -258,7 +265,7 @@ func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) {
func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) {
out := new(CoreResponse)
err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, opts...)
if err != nil {
return nil, err
}
@ -267,15 +274,14 @@ func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, op
func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) {
out := new(Void)
err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for WorkerService service
// WorkerServiceServer is the server API for WorkerService service.
type WorkerServiceServer interface {
// Start server with specified workload.
// First request sent specifies the ServerConfig followed by ServerStatus
@ -419,9 +425,9 @@ var _WorkerService_serviceDesc = grpc.ServiceDesc{
Metadata: "services.proto",
}
func init() { proto.RegisterFile("services.proto", fileDescriptor3) }
func init() { proto.RegisterFile("services.proto", fileDescriptor_services_bf68f4d7cbd0e0a1) }
var fileDescriptor3 = []byte{
var fileDescriptor_services_bf68f4d7cbd0e0a1 = []byte{
// 255 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17,

View File

@ -12,20 +12,48 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ServerStats struct {
// wall clock time change in seconds since last reset
TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"`
TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"`
// change in user time (in seconds) used by the server since last reset
TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"`
TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"`
// change in server time (in seconds) used by the server process and all
// threads since last reset
TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"`
TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerStats) Reset() { *m = ServerStats{} }
func (m *ServerStats) String() string { return proto.CompactTextString(m) }
func (*ServerStats) ProtoMessage() {}
func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
func (m *ServerStats) Reset() { *m = ServerStats{} }
func (m *ServerStats) String() string { return proto.CompactTextString(m) }
func (*ServerStats) ProtoMessage() {}
func (*ServerStats) Descriptor() ([]byte, []int) {
return fileDescriptor_stats_8ba831c0cb3c3440, []int{0}
}
func (m *ServerStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerStats.Unmarshal(m, b)
}
func (m *ServerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerStats.Marshal(b, m, deterministic)
}
func (dst *ServerStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerStats.Merge(dst, src)
}
func (m *ServerStats) XXX_Size() int {
return xxx_messageInfo_ServerStats.Size(m)
}
func (m *ServerStats) XXX_DiscardUnknown() {
xxx_messageInfo_ServerStats.DiscardUnknown(m)
}
var xxx_messageInfo_ServerStats proto.InternalMessageInfo
func (m *ServerStats) GetTimeElapsed() float64 {
if m != nil {
@ -50,14 +78,36 @@ func (m *ServerStats) GetTimeSystem() float64 {
// Histogram params based on grpc/support/histogram.c
type HistogramParams struct {
Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"`
MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"`
Resolution float64 `protobuf:"fixed64,1,opt,name=resolution,proto3" json:"resolution,omitempty"`
MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible,proto3" json:"max_possible,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HistogramParams) Reset() { *m = HistogramParams{} }
func (m *HistogramParams) String() string { return proto.CompactTextString(m) }
func (*HistogramParams) ProtoMessage() {}
func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
func (m *HistogramParams) Reset() { *m = HistogramParams{} }
func (m *HistogramParams) String() string { return proto.CompactTextString(m) }
func (*HistogramParams) ProtoMessage() {}
func (*HistogramParams) Descriptor() ([]byte, []int) {
return fileDescriptor_stats_8ba831c0cb3c3440, []int{1}
}
func (m *HistogramParams) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HistogramParams.Unmarshal(m, b)
}
func (m *HistogramParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HistogramParams.Marshal(b, m, deterministic)
}
func (dst *HistogramParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_HistogramParams.Merge(dst, src)
}
func (m *HistogramParams) XXX_Size() int {
return xxx_messageInfo_HistogramParams.Size(m)
}
func (m *HistogramParams) XXX_DiscardUnknown() {
xxx_messageInfo_HistogramParams.DiscardUnknown(m)
}
var xxx_messageInfo_HistogramParams proto.InternalMessageInfo
func (m *HistogramParams) GetResolution() float64 {
if m != nil {
@ -75,18 +125,40 @@ func (m *HistogramParams) GetMaxPossible() float64 {
// Histogram data based on grpc/support/histogram.c
type HistogramData struct {
Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket" json:"bucket,omitempty"`
MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"`
MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"`
Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"`
SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"`
Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"`
Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket,proto3" json:"bucket,omitempty"`
MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen,proto3" json:"min_seen,omitempty"`
MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen,proto3" json:"max_seen,omitempty"`
Sum float64 `protobuf:"fixed64,4,opt,name=sum,proto3" json:"sum,omitempty"`
SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares,proto3" json:"sum_of_squares,omitempty"`
Count float64 `protobuf:"fixed64,6,opt,name=count,proto3" json:"count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HistogramData) Reset() { *m = HistogramData{} }
func (m *HistogramData) String() string { return proto.CompactTextString(m) }
func (*HistogramData) ProtoMessage() {}
func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
func (m *HistogramData) Reset() { *m = HistogramData{} }
func (m *HistogramData) String() string { return proto.CompactTextString(m) }
func (*HistogramData) ProtoMessage() {}
func (*HistogramData) Descriptor() ([]byte, []int) {
return fileDescriptor_stats_8ba831c0cb3c3440, []int{2}
}
func (m *HistogramData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HistogramData.Unmarshal(m, b)
}
func (m *HistogramData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HistogramData.Marshal(b, m, deterministic)
}
func (dst *HistogramData) XXX_Merge(src proto.Message) {
xxx_messageInfo_HistogramData.Merge(dst, src)
}
func (m *HistogramData) XXX_Size() int {
return xxx_messageInfo_HistogramData.Size(m)
}
func (m *HistogramData) XXX_DiscardUnknown() {
xxx_messageInfo_HistogramData.DiscardUnknown(m)
}
var xxx_messageInfo_HistogramData proto.InternalMessageInfo
func (m *HistogramData) GetBucket() []uint32 {
if m != nil {
@ -132,17 +204,39 @@ func (m *HistogramData) GetCount() float64 {
type ClientStats struct {
// Latency histogram. Data points are in nanoseconds.
Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"`
Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies,proto3" json:"latencies,omitempty"`
// See ServerStats for details.
TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"`
TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"`
TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"`
TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"`
TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"`
TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) {
return fileDescriptor_stats_8ba831c0cb3c3440, []int{3}
}
func (m *ClientStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClientStats.Unmarshal(m, b)
}
func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
}
func (dst *ClientStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientStats.Merge(dst, src)
}
func (m *ClientStats) XXX_Size() int {
return xxx_messageInfo_ClientStats.Size(m)
}
func (m *ClientStats) XXX_DiscardUnknown() {
xxx_messageInfo_ClientStats.DiscardUnknown(m)
}
var xxx_messageInfo_ClientStats proto.InternalMessageInfo
func (m *ClientStats) GetLatencies() *HistogramData {
if m != nil {
@ -179,9 +273,9 @@ func init() {
proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats")
}
func init() { proto.RegisterFile("stats.proto", fileDescriptor4) }
func init() { proto.RegisterFile("stats.proto", fileDescriptor_stats_8ba831c0cb3c3440) }
var fileDescriptor4 = []byte{
var fileDescriptor_stats_8ba831c0cb3c3440 = []byte{
// 341 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40,
0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c,

187
vendor/google.golang.org/grpc/benchmark/run_bench.sh generated vendored Executable file
View File

@ -0,0 +1,187 @@
#!/bin/bash
rpcs=(1)
conns=(1)
warmup=10
dur=10
reqs=(1)
resps=(1)
rpc_types=(unary)
# idx[0] = idx value for rpcs
# idx[1] = idx value for conns
# idx[2] = idx value for reqs
# idx[3] = idx value for resps
# idx[4] = idx value for rpc_types
idx=(0 0 0 0 0)
idx_max=(1 1 1 1 1)
inc()
{
for i in $(seq $((${#idx[@]}-1)) -1 0); do
idx[${i}]=$((${idx[${i}]}+1))
if [ ${idx[${i}]} == ${idx_max[${i}]} ]; then
idx[${i}]=0
else
break
fi
done
local fin
fin=1
# Check to see if we have looped back to the beginning.
for v in ${idx[@]}; do
if [ ${v} != 0 ]; then
fin=0
break
fi
done
if [ ${fin} == 1 ]; then
rm -Rf ${out_dir}
clean_and_die 0
fi
}
clean_and_die() {
rm -Rf ${out_dir}
exit $1
}
run(){
local nr
nr=${rpcs[${idx[0]}]}
local nc
nc=${conns[${idx[1]}]}
req_sz=${reqs[${idx[2]}]}
resp_sz=${resps[${idx[3]}]}
r_type=${rpc_types[${idx[4]}]}
# Following runs one benchmark
base_port=50051
delta=0
test_name="r_"${nr}"_c_"${nc}"_req_"${req_sz}"_resp_"${resp_sz}"_"${r_type}"_"$(date +%s)
echo "================================================================================"
echo ${test_name}
while :
do
port=$((${base_port}+${delta}))
# Launch the server in background
${out_dir}/server --port=${port} --test_name="Server_"${test_name}&
server_pid=$(echo $!)
# Launch the client
${out_dir}/client --port=${port} --d=${dur} --w=${warmup} --r=${nr} --c=${nc} --req=${req_sz} --resp=${resp_sz} --rpc_type=${r_type} --test_name="client_"${test_name}
client_status=$(echo $?)
kill ${server_pid}
wait ${server_pid}
if [ ${client_status} == 0 ]; then
break
fi
delta=$((${delta}+1))
if [ ${delta} == 10 ]; then
echo "Continuous 10 failed runs. Exiting now."
rm -Rf ${out_dir}
clean_and_die 1
fi
done
}
set_param(){
local argname=$1
shift
local idx=$1
shift
if [ $# -eq 0 ]; then
echo "${argname} not specified"
exit 1
fi
PARAM=($(echo $1 | sed 's/,/ /g'))
if [ ${idx} -lt 0 ]; then
return
fi
idx_max[${idx}]=${#PARAM[@]}
}
while [ $# -gt 0 ]; do
case "$1" in
-r)
shift
set_param "number of rpcs" 0 $1
rpcs=(${PARAM[@]})
shift
;;
-c)
shift
set_param "number of connections" 1 $1
conns=(${PARAM[@]})
shift
;;
-w)
shift
set_param "warm-up period" -1 $1
warmup=${PARAM}
shift
;;
-d)
shift
set_param "duration" -1 $1
dur=${PARAM}
shift
;;
-req)
shift
set_param "request size" 2 $1
reqs=(${PARAM[@]})
shift
;;
-resp)
shift
set_param "response size" 3 $1
resps=(${PARAM[@]})
shift
;;
-rpc_type)
shift
set_param "rpc type" 4 $1
rpc_types=(${PARAM[@]})
shift
;;
-h|--help)
echo "Following are valid options:"
echo
echo "-h, --help show brief help"
echo "-w warm-up duration in seconds, default value is 10"
echo "-d benchmark duration in seconds, default value is 60"
echo ""
echo "Each of the following can have multiple comma separated values."
echo ""
echo "-r number of RPCs, default value is 1"
echo "-c number of Connections, default value is 1"
echo "-req req size in bytes, default value is 1"
echo "-resp resp size in bytes, default value is 1"
echo "-rpc_type valid values are unary|streaming, default is unary"
;;
*)
echo "Incorrect option $1"
exit 1
;;
esac
done
# Build server and client
out_dir=$(mktemp -d oss_benchXXX)
go build -o ${out_dir}/server $GOPATH/src/google.golang.org/grpc/benchmark/server/main.go && go build -o ${out_dir}/client $GOPATH/src/google.golang.org/grpc/benchmark/client/main.go
if [ $? != 0 ]; then
clean_and_die 1
fi
while :
do
run
inc
done

View File

@ -20,32 +20,71 @@ package main
import (
"flag"
"math"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"syscall"
"time"
"golang.org/x/sys/unix"
"google.golang.org/grpc/benchmark"
"google.golang.org/grpc/grpclog"
)
var duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server")
var (
port = flag.String("port", "50051", "Localhost port to listen on.")
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
)
func main() {
flag.Parse()
go func() {
lis, err := net.Listen("tcp", ":0")
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
grpclog.Println("Server profiling address: ", lis.Addr().String())
if err := http.Serve(lis, nil); err != nil {
grpclog.Fatalf("Failed to serve: %v", err)
}
}()
addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces
grpclog.Println("Server Address: ", addr)
<-time.After(time.Duration(*duration) * time.Second)
stopper()
if *testName == "" {
grpclog.Fatalf("test name not set")
}
lis, err := net.Listen("tcp", ":"+*port)
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
defer lis.Close()
cf, err := os.Create("/tmp/" + *testName + ".cpu")
if err != nil {
grpclog.Fatalf("Failed to create file: %v", err)
}
defer cf.Close()
pprof.StartCPUProfile(cf)
cpuBeg := getCPUTime()
// Launch server in a separate goroutine.
stop := benchmark.StartServer(benchmark.ServerInfo{Type: "protobuf", Listener: lis})
// Wait on OS terminate signal.
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGTERM)
<-ch
cpu := time.Duration(getCPUTime() - cpuBeg)
stop()
pprof.StopCPUProfile()
mf, err := os.Create("/tmp/" + *testName + ".mem")
if err != nil {
grpclog.Fatalf("Failed to create file: %v", err)
}
defer mf.Close()
runtime.GC() // materialize all statistics
if err := pprof.WriteHeapProfile(mf); err != nil {
grpclog.Fatalf("Failed to write memory profile: %v", err)
}
fmt.Println("Server CPU utilization:", cpu)
fmt.Println("Server CPU profile:", cf.Name())
fmt.Println("Server Mem Profile:", mf.Name())
}
func getCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
grpclog.Fatal(err)
}
return ts.Nano()
}

View File

@ -39,6 +39,7 @@ type Features struct {
ReqSizeBytes int
RespSizeBytes int
EnableCompressor bool
EnableChannelz bool
}
// String returns the textual output of the Features as string.
@ -48,6 +49,13 @@ func (f Features) String() string {
f.Latency.String(), f.Kbps, f.Mtu, f.MaxConcurrentCalls, f.ReqSizeBytes, f.RespSizeBytes, f.EnableCompressor)
}
// ConciseString returns the concise textual output of the Features as string, skipping
// setting with default value.
func (f Features) ConciseString() string {
noneEmptyPos := []bool{f.EnableTrace, f.Latency != 0, f.Kbps != 0, f.Mtu != 0, true, true, true, f.EnableCompressor, f.EnableChannelz}
return PartialPrintString(noneEmptyPos, f, false)
}
// PartialPrintString can print certain features with different format.
func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string {
s := ""
@ -63,7 +71,7 @@ func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string {
linker = "_"
}
if noneEmptyPos[0] {
s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableCompressor, suffix)
s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableTrace, suffix)
}
if shared && f.NetworkMode != "" {
s += fmt.Sprintf("Network: %s \n", f.NetworkMode)
@ -92,6 +100,9 @@ func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string {
if noneEmptyPos[7] {
s += fmt.Sprintf("%sCompressor%s%t%s", prefix, linker, f.EnableCompressor, suffix)
}
if noneEmptyPos[8] {
s += fmt.Sprintf("%sChannelz%s%t%s", prefix, linker, f.EnableChannelz, suffix)
}
return s
}

View File

@ -81,20 +81,20 @@ func printClientConfig(config *testpb.ClientConfig) {
// will always create sync client
// - async client threads.
// - core list
grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType)
grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads)
grpclog.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType)
grpclog.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads)
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
grpclog.Printf(" - security params: %v", config.SecurityParams)
grpclog.Printf(" - core limit: %v", config.CoreLimit)
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
grpclog.Printf(" - channel number: %v", config.ClientChannels)
grpclog.Printf(" - load params: %v", config.LoadParams)
grpclog.Printf(" - rpc type: %v", config.RpcType)
grpclog.Printf(" - histogram params: %v", config.HistogramParams)
grpclog.Printf(" - server targets: %v", config.ServerTargets)
grpclog.Infof(" - security params: %v", config.SecurityParams)
grpclog.Infof(" - core limit: %v", config.CoreLimit)
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
grpclog.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
grpclog.Infof(" - channel number: %v", config.ClientChannels)
grpclog.Infof(" - load params: %v", config.LoadParams)
grpclog.Infof(" - rpc type: %v", config.RpcType)
grpclog.Infof(" - histogram params: %v", config.HistogramParams)
grpclog.Infof(" - server targets: %v", config.ServerTargets)
}
func setupClientEnv(config *testpb.ClientConfig) {
@ -118,7 +118,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error
case testpb.ClientType_SYNC_CLIENT:
case testpb.ClientType_ASYNC_CLIENT:
default:
return nil, nil, status.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType)
return nil, nil, status.Errorf(codes.InvalidArgument, "unknown client type: %v", config.ClientType)
}
// Check and set security options.
@ -142,13 +142,13 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error
opts = append(opts, grpc.WithDefaultCallOptions(grpc.CallCustomCodec(byteBufCodec{})))
case *testpb.PayloadConfig_SimpleParams:
default:
return nil, nil, status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
return nil, nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
}
}
// Create connections.
connCount := int(config.ClientChannels)
conns := make([]*grpc.ClientConn, connCount, connCount)
conns := make([]*grpc.ClientConn, connCount)
for connIndex := 0; connIndex < connCount; connIndex++ {
conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...)
}
@ -177,7 +177,7 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc
payloadRespSize = int(c.SimpleParams.RespSize)
payloadType = "protobuf"
default:
return status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
return status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
}
}
@ -228,7 +228,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error)
BaseBucketSize: (1 + config.HistogramParams.Resolution),
MinValue: 0,
},
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)),
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns)),
stop: make(chan bool),
lastResetTime: time.Now(),
@ -340,7 +340,7 @@ func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
if reset {
// Merging histogram may take some time.
// Put all histograms aside and merge later.
toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms))
toMerge := make([]*stats.Histogram, len(bc.lockingHistograms))
for i := range bc.lockingHistograms {
toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions))
}
@ -366,7 +366,7 @@ func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage)
}
b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets))
b := make([]uint32, len(mergedHistogram.Buckets))
for i, v := range mergedHistogram.Buckets {
b[i] = uint32(v.Count)
}

View File

@ -20,6 +20,8 @@ package main
import (
"flag"
"fmt"
"net"
"runtime"
"strconv"
"strings"
@ -57,15 +59,15 @@ func printServerConfig(config *testpb.ServerConfig) {
// will always start sync server
// - async server threads
// - core list
grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType)
grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads)
grpclog.Infof(" * server type: %v (ignored, always starts sync server)", config.ServerType)
grpclog.Infof(" * async server threads: %v (ignored)", config.AsyncServerThreads)
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
grpclog.Printf(" - security params: %v", config.SecurityParams)
grpclog.Printf(" - core limit: %v", config.CoreLimit)
grpclog.Printf(" - port: %v", config.Port)
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
grpclog.Infof(" - security params: %v", config.SecurityParams)
grpclog.Infof(" - core limit: %v", config.CoreLimit)
grpclog.Infof(" - port: %v", config.Port)
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
}
func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) {
@ -87,7 +89,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma
case testpb.ServerType_ASYNC_SERVER:
case testpb.ServerType_ASYNC_GENERIC_SERVER:
default:
return nil, status.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType)
return nil, status.Errorf(codes.InvalidArgument, "unknown server type: %v", config.ServerType)
}
// Set security options.
@ -110,41 +112,42 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma
if port == 0 {
port = serverPort
}
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
grpclog.Fatalf("Failed to listen: %v", err)
}
addr := lis.Addr().String()
// Create different benchmark server according to config.
var (
addr string
closeFunc func()
err error
)
var closeFunc func()
if config.PayloadConfig != nil {
switch payload := config.PayloadConfig.Payload.(type) {
case *testpb.PayloadConfig_BytebufParams:
opts = append(opts, grpc.CustomCodec(byteBufCodec{}))
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Type: "bytebuf",
Metadata: payload.BytebufParams.RespSize,
Listener: lis,
}, opts...)
case *testpb.PayloadConfig_SimpleParams:
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
Type: "protobuf",
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Type: "protobuf",
Listener: lis,
}, opts...)
case *testpb.PayloadConfig_ComplexParams:
return nil, status.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig)
default:
return nil, status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
return nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
}
} else {
// Start protobuf server if payload config is nil.
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Addr: ":" + strconv.Itoa(port),
Type: "protobuf",
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
Type: "protobuf",
Listener: lis,
}, opts...)
}
grpclog.Printf("benchmark server listening at %v", addr)
grpclog.Infof("benchmark server listening at %v", addr)
addrSplitted := strings.Split(addr, ":")
p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1])
if err != nil {

View File

@ -79,7 +79,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
var bs *benchmarkServer
defer func() {
// Close benchmark server when stream ends.
grpclog.Printf("closing benchmark server")
grpclog.Infof("closing benchmark server")
if bs != nil {
bs.closeFunc()
}
@ -96,9 +96,9 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
var out *testpb.ServerStatus
switch argtype := in.Argtype.(type) {
case *testpb.ServerArgs_Setup:
grpclog.Printf("server setup received:")
grpclog.Infof("server setup received:")
if bs != nil {
grpclog.Printf("server setup received when server already exists, closing the existing server")
grpclog.Infof("server setup received when server already exists, closing the existing server")
bs.closeFunc()
}
bs, err = startBenchmarkServer(argtype.Setup, s.serverPort)
@ -112,8 +112,8 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
}
case *testpb.ServerArgs_Mark:
grpclog.Printf("server mark received:")
grpclog.Printf(" - %v", argtype)
grpclog.Infof("server mark received:")
grpclog.Infof(" - %v", argtype)
if bs == nil {
return status.Error(codes.InvalidArgument, "server does not exist when mark received")
}
@ -134,7 +134,7 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
var bc *benchmarkClient
defer func() {
// Shut down benchmark client when stream ends.
grpclog.Printf("shuting down benchmark client")
grpclog.Infof("shuting down benchmark client")
if bc != nil {
bc.shutdown()
}
@ -151,9 +151,9 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
var out *testpb.ClientStatus
switch t := in.Argtype.(type) {
case *testpb.ClientArgs_Setup:
grpclog.Printf("client setup received:")
grpclog.Infof("client setup received:")
if bc != nil {
grpclog.Printf("client setup received when client already exists, shuting down the existing client")
grpclog.Infof("client setup received when client already exists, shuting down the existing client")
bc.shutdown()
}
bc, err = startBenchmarkClient(t.Setup)
@ -165,8 +165,8 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
}
case *testpb.ClientArgs_Mark:
grpclog.Printf("client mark received:")
grpclog.Printf(" - %v", t)
grpclog.Infof("client mark received:")
grpclog.Infof(" - %v", t)
if bc == nil {
return status.Error(codes.InvalidArgument, "client does not exist when mark received")
}
@ -182,12 +182,12 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
}
func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) {
grpclog.Printf("core count: %v", runtime.NumCPU())
grpclog.Infof("core count: %v", runtime.NumCPU())
return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil
}
func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) {
grpclog.Printf("quiting worker")
grpclog.Infof("quitting worker")
s.stop <- true
return &testpb.Void{}, nil
}
@ -200,7 +200,7 @@ func main() {
if err != nil {
grpclog.Fatalf("failed to listen: %v", err)
}
grpclog.Printf("worker listening at port %v", *driverPort)
grpclog.Infof("worker listening at port %v", *driverPort)
s := grpc.NewServer()
stop := make(chan bool)
@ -221,8 +221,8 @@ func main() {
if *pprofPort >= 0 {
go func() {
grpclog.Println("Starting pprof server on port " + strconv.Itoa(*pprofPort))
grpclog.Println(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
grpclog.Infoln("Starting pprof server on port " + strconv.Itoa(*pprofPort))
grpclog.Infoln(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
}()
}