mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor files
This commit is contained in:
391
vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go
generated
vendored
Normal file
391
vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go
generated
vendored
Normal file
@ -0,0 +1,391 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
var caFile = flag.String("ca_file", "", "The file containing the CA root cert file")
|
||||
|
||||
type lockingHistogram struct {
|
||||
mu sync.Mutex
|
||||
histogram *stats.Histogram
|
||||
}
|
||||
|
||||
func (h *lockingHistogram) add(value int64) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
h.histogram.Add(value)
|
||||
}
|
||||
|
||||
// swap sets h.histogram to new, and returns its old value.
|
||||
func (h *lockingHistogram) swap(new *stats.Histogram) *stats.Histogram {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
old := h.histogram
|
||||
h.histogram = new
|
||||
return old
|
||||
}
|
||||
|
||||
func (h *lockingHistogram) mergeInto(merged *stats.Histogram) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
merged.Merge(h.histogram)
|
||||
}
|
||||
|
||||
type benchmarkClient struct {
|
||||
closeConns func()
|
||||
stop chan bool
|
||||
lastResetTime time.Time
|
||||
histogramOptions stats.HistogramOptions
|
||||
lockingHistograms []lockingHistogram
|
||||
rusageLastReset *syscall.Rusage
|
||||
}
|
||||
|
||||
func printClientConfig(config *testpb.ClientConfig) {
|
||||
// Some config options are ignored:
|
||||
// - client type:
|
||||
// will always create sync client
|
||||
// - async client threads.
|
||||
// - core list
|
||||
grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType)
|
||||
grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Printf(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Printf(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
|
||||
grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
|
||||
grpclog.Printf(" - channel number: %v", config.ClientChannels)
|
||||
grpclog.Printf(" - load params: %v", config.LoadParams)
|
||||
grpclog.Printf(" - rpc type: %v", config.RpcType)
|
||||
grpclog.Printf(" - histogram params: %v", config.HistogramParams)
|
||||
grpclog.Printf(" - server targets: %v", config.ServerTargets)
|
||||
}
|
||||
|
||||
func setupClientEnv(config *testpb.ClientConfig) {
|
||||
// Use all cpu cores available on machine by default.
|
||||
// TODO: Revisit this for the optimal default setup.
|
||||
if config.CoreLimit > 0 {
|
||||
runtime.GOMAXPROCS(int(config.CoreLimit))
|
||||
} else {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
}
|
||||
|
||||
// createConns creates connections according to given config.
|
||||
// It returns the connections and corresponding function to close them.
|
||||
// It returns non-nil error if there is anything wrong.
|
||||
func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) {
|
||||
var opts []grpc.DialOption
|
||||
|
||||
// Sanity check for client type.
|
||||
switch config.ClientType {
|
||||
case testpb.ClientType_SYNC_CLIENT:
|
||||
case testpb.ClientType_ASYNC_CLIENT:
|
||||
default:
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType)
|
||||
}
|
||||
|
||||
// Check and set security options.
|
||||
if config.SecurityParams != nil {
|
||||
if *caFile == "" {
|
||||
*caFile = testdata.Path("ca.pem")
|
||||
}
|
||||
creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithTransportCredentials(creds))
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
|
||||
// Use byteBufCodec if it is required.
|
||||
if config.PayloadConfig != nil {
|
||||
switch config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
opts = append(opts, grpc.WithCodec(byteBufCodec{}))
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
default:
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// Create connections.
|
||||
connCount := int(config.ClientChannels)
|
||||
conns := make([]*grpc.ClientConn, connCount, connCount)
|
||||
for connIndex := 0; connIndex < connCount; connIndex++ {
|
||||
conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...)
|
||||
}
|
||||
|
||||
return conns, func() {
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
|
||||
// Read payload size and type from config.
|
||||
var (
|
||||
payloadReqSize, payloadRespSize int
|
||||
payloadType string
|
||||
)
|
||||
if config.PayloadConfig != nil {
|
||||
switch c := config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
payloadReqSize = int(c.BytebufParams.ReqSize)
|
||||
payloadRespSize = int(c.BytebufParams.RespSize)
|
||||
payloadType = "bytebuf"
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
payloadReqSize = int(c.SimpleParams.ReqSize)
|
||||
payloadRespSize = int(c.SimpleParams.RespSize)
|
||||
payloadType = "protobuf"
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add open loop distribution.
|
||||
switch config.LoadParams.Load.(type) {
|
||||
case *testpb.LoadParams_ClosedLoop:
|
||||
case *testpb.LoadParams_Poisson:
|
||||
return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams)
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams)
|
||||
}
|
||||
|
||||
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
|
||||
|
||||
switch config.RpcType {
|
||||
case testpb.RpcType_UNARY:
|
||||
bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize)
|
||||
// TODO open loop.
|
||||
case testpb.RpcType_STREAMING:
|
||||
bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType)
|
||||
// TODO open loop.
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
|
||||
printClientConfig(config)
|
||||
|
||||
// Set running environment like how many cores to use.
|
||||
setupClientEnv(config)
|
||||
|
||||
conns, closeConns, err := createConns(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rusage := new(syscall.Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
|
||||
|
||||
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
|
||||
bc := &benchmarkClient{
|
||||
histogramOptions: stats.HistogramOptions{
|
||||
NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1,
|
||||
GrowthFactor: config.HistogramParams.Resolution,
|
||||
BaseBucketSize: (1 + config.HistogramParams.Resolution),
|
||||
MinValue: 0,
|
||||
},
|
||||
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)),
|
||||
|
||||
stop: make(chan bool),
|
||||
lastResetTime: time.Now(),
|
||||
closeConns: closeConns,
|
||||
rusageLastReset: rusage,
|
||||
}
|
||||
|
||||
if err = performRPCs(config, conns, bc); err != nil {
|
||||
// Close all connections if performRPCs failed.
|
||||
closeConns()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) {
|
||||
for ic, conn := range conns {
|
||||
client := testpb.NewBenchmarkServiceClient(conn)
|
||||
// For each connection, create rpcCountPerConn goroutines to do rpc.
|
||||
for j := 0; j < rpcCountPerConn; j++ {
|
||||
// Create histogram for each goroutine.
|
||||
idx := ic*rpcCountPerConn + j
|
||||
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
|
||||
// Start goroutine on the created mutex and histogram.
|
||||
go func(idx int) {
|
||||
// TODO: do warm up if necessary.
|
||||
// Now relying on worker client to reserve time to do warm up.
|
||||
// The worker client needs to wait for some time after client is created,
|
||||
// before starting benchmark.
|
||||
done := make(chan bool)
|
||||
for {
|
||||
go func() {
|
||||
start := time.Now()
|
||||
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
|
||||
select {
|
||||
case <-bc.stop:
|
||||
case done <- false:
|
||||
}
|
||||
return
|
||||
}
|
||||
elapse := time.Since(start)
|
||||
bc.lockingHistograms[idx].add(int64(elapse))
|
||||
select {
|
||||
case <-bc.stop:
|
||||
case done <- true:
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-bc.stop:
|
||||
return
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) {
|
||||
var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error
|
||||
if payloadType == "bytebuf" {
|
||||
doRPC = benchmark.DoByteBufStreamingRoundTrip
|
||||
} else {
|
||||
doRPC = benchmark.DoStreamingRoundTrip
|
||||
}
|
||||
for ic, conn := range conns {
|
||||
// For each connection, create rpcCountPerConn goroutines to do rpc.
|
||||
for j := 0; j < rpcCountPerConn; j++ {
|
||||
c := testpb.NewBenchmarkServiceClient(conn)
|
||||
stream, err := c.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
|
||||
}
|
||||
// Create histogram for each goroutine.
|
||||
idx := ic*rpcCountPerConn + j
|
||||
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
|
||||
// Start goroutine on the created mutex and histogram.
|
||||
go func(idx int) {
|
||||
// TODO: do warm up if necessary.
|
||||
// Now relying on worker client to reserve time to do warm up.
|
||||
// The worker client needs to wait for some time after client is created,
|
||||
// before starting benchmark.
|
||||
for {
|
||||
start := time.Now()
|
||||
if err := doRPC(stream, reqSize, respSize); err != nil {
|
||||
return
|
||||
}
|
||||
elapse := time.Since(start)
|
||||
bc.lockingHistograms[idx].add(int64(elapse))
|
||||
select {
|
||||
case <-bc.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getStats returns the stats for benchmark client.
|
||||
// It resets lastResetTime and all histograms if argument reset is true.
|
||||
func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
|
||||
var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64
|
||||
mergedHistogram := stats.NewHistogram(bc.histogramOptions)
|
||||
latestRusage := new(syscall.Rusage)
|
||||
|
||||
if reset {
|
||||
// Merging histogram may take some time.
|
||||
// Put all histograms aside and merge later.
|
||||
toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms))
|
||||
for i := range bc.lockingHistograms {
|
||||
toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions))
|
||||
}
|
||||
|
||||
for i := 0; i < len(toMerge); i++ {
|
||||
mergedHistogram.Merge(toMerge[i])
|
||||
}
|
||||
|
||||
wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage)
|
||||
uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage)
|
||||
|
||||
bc.rusageLastReset = latestRusage
|
||||
bc.lastResetTime = time.Now()
|
||||
} else {
|
||||
// Merge only, not reset.
|
||||
for i := range bc.lockingHistograms {
|
||||
bc.lockingHistograms[i].mergeInto(mergedHistogram)
|
||||
}
|
||||
|
||||
wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage)
|
||||
uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage)
|
||||
}
|
||||
|
||||
b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets))
|
||||
for i, v := range mergedHistogram.Buckets {
|
||||
b[i] = uint32(v.Count)
|
||||
}
|
||||
return &testpb.ClientStats{
|
||||
Latencies: &testpb.HistogramData{
|
||||
Bucket: b,
|
||||
MinSeen: float64(mergedHistogram.Min),
|
||||
MaxSeen: float64(mergedHistogram.Max),
|
||||
Sum: float64(mergedHistogram.Sum),
|
||||
SumOfSquares: float64(mergedHistogram.SumOfSquares),
|
||||
Count: float64(mergedHistogram.Count),
|
||||
},
|
||||
TimeElapsed: wallTimeElapsed,
|
||||
TimeUser: uTimeElapsed,
|
||||
TimeSystem: sTimeElapsed,
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) shutdown() {
|
||||
close(bc.stop)
|
||||
bc.closeConns()
|
||||
}
|
185
vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go
generated
vendored
Normal file
185
vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
var (
|
||||
certFile = flag.String("tls_cert_file", "", "The TLS cert file")
|
||||
keyFile = flag.String("tls_key_file", "", "The TLS key file")
|
||||
)
|
||||
|
||||
type benchmarkServer struct {
|
||||
port int
|
||||
cores int
|
||||
closeFunc func()
|
||||
mu sync.RWMutex
|
||||
lastResetTime time.Time
|
||||
rusageLastReset *syscall.Rusage
|
||||
}
|
||||
|
||||
func printServerConfig(config *testpb.ServerConfig) {
|
||||
// Some config options are ignored:
|
||||
// - server type:
|
||||
// will always start sync server
|
||||
// - async server threads
|
||||
// - core list
|
||||
grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType)
|
||||
grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Printf(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Printf(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Printf(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Printf(" - port: %v", config.Port)
|
||||
grpclog.Printf(" - payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
|
||||
func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) {
|
||||
printServerConfig(config)
|
||||
|
||||
// Use all cpu cores available on machine by default.
|
||||
// TODO: Revisit this for the optimal default setup.
|
||||
numOfCores := runtime.NumCPU()
|
||||
if config.CoreLimit > 0 {
|
||||
numOfCores = int(config.CoreLimit)
|
||||
}
|
||||
runtime.GOMAXPROCS(numOfCores)
|
||||
|
||||
var opts []grpc.ServerOption
|
||||
|
||||
// Sanity check for server type.
|
||||
switch config.ServerType {
|
||||
case testpb.ServerType_SYNC_SERVER:
|
||||
case testpb.ServerType_ASYNC_SERVER:
|
||||
case testpb.ServerType_ASYNC_GENERIC_SERVER:
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType)
|
||||
}
|
||||
|
||||
// Set security options.
|
||||
if config.SecurityParams != nil {
|
||||
if *certFile == "" {
|
||||
*certFile = testdata.Path("server1.pem")
|
||||
}
|
||||
if *keyFile == "" {
|
||||
*keyFile = testdata.Path("server1.key")
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to generate credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
}
|
||||
|
||||
// Priority: config.Port > serverPort > default (0).
|
||||
port := int(config.Port)
|
||||
if port == 0 {
|
||||
port = serverPort
|
||||
}
|
||||
|
||||
// Create different benchmark server according to config.
|
||||
var (
|
||||
addr string
|
||||
closeFunc func()
|
||||
err error
|
||||
)
|
||||
if config.PayloadConfig != nil {
|
||||
switch payload := config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
opts = append(opts, grpc.CustomCodec(byteBufCodec{}))
|
||||
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Addr: ":" + strconv.Itoa(port),
|
||||
Type: "bytebuf",
|
||||
Metadata: payload.BytebufParams.RespSize,
|
||||
}, opts...)
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Addr: ":" + strconv.Itoa(port),
|
||||
Type: "protobuf",
|
||||
}, opts...)
|
||||
case *testpb.PayloadConfig_ComplexParams:
|
||||
return nil, status.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig)
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
} else {
|
||||
// Start protobuf server if payload config is nil.
|
||||
addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Addr: ":" + strconv.Itoa(port),
|
||||
Type: "protobuf",
|
||||
}, opts...)
|
||||
}
|
||||
|
||||
grpclog.Printf("benchmark server listening at %v", addr)
|
||||
addrSplitted := strings.Split(addr, ":")
|
||||
p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1])
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to get port number from server address: %v", err)
|
||||
}
|
||||
|
||||
rusage := new(syscall.Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
|
||||
|
||||
return &benchmarkServer{
|
||||
port: p,
|
||||
cores: numOfCores,
|
||||
closeFunc: closeFunc,
|
||||
lastResetTime: time.Now(),
|
||||
rusageLastReset: rusage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getStats returns the stats for benchmark server.
|
||||
// It resets lastResetTime if argument reset is true.
|
||||
func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats {
|
||||
bs.mu.RLock()
|
||||
defer bs.mu.RUnlock()
|
||||
wallTimeElapsed := time.Since(bs.lastResetTime).Seconds()
|
||||
rusageLatest := new(syscall.Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, rusageLatest)
|
||||
uTimeElapsed, sTimeElapsed := cpuTimeDiff(bs.rusageLastReset, rusageLatest)
|
||||
|
||||
if reset {
|
||||
bs.lastResetTime = time.Now()
|
||||
bs.rusageLastReset = rusageLatest
|
||||
}
|
||||
return &testpb.ServerStats{
|
||||
TimeElapsed: wallTimeElapsed,
|
||||
TimeUser: uTimeElapsed,
|
||||
TimeSystem: sTimeElapsed,
|
||||
}
|
||||
}
|
230
vendor/google.golang.org/grpc/benchmark/worker/main.go
generated
vendored
Normal file
230
vendor/google.golang.org/grpc/benchmark/worker/main.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
driverPort = flag.Int("driver_port", 10000, "port for communication with driver")
|
||||
serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message")
|
||||
pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset")
|
||||
blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile")
|
||||
)
|
||||
|
||||
type byteBufCodec struct {
|
||||
}
|
||||
|
||||
func (byteBufCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
b, ok := v.(*[]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
|
||||
}
|
||||
return *b, nil
|
||||
}
|
||||
|
||||
func (byteBufCodec) Unmarshal(data []byte, v interface{}) error {
|
||||
b, ok := v.(*[]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
|
||||
}
|
||||
*b = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (byteBufCodec) String() string {
|
||||
return "bytebuffer"
|
||||
}
|
||||
|
||||
// workerServer implements WorkerService rpc handlers.
|
||||
// It can create benchmarkServer or benchmarkClient on demand.
|
||||
type workerServer struct {
|
||||
stop chan<- bool
|
||||
serverPort int
|
||||
}
|
||||
|
||||
func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error {
|
||||
var bs *benchmarkServer
|
||||
defer func() {
|
||||
// Close benchmark server when stream ends.
|
||||
grpclog.Printf("closing benchmark server")
|
||||
if bs != nil {
|
||||
bs.closeFunc()
|
||||
}
|
||||
}()
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out *testpb.ServerStatus
|
||||
switch argtype := in.Argtype.(type) {
|
||||
case *testpb.ServerArgs_Setup:
|
||||
grpclog.Printf("server setup received:")
|
||||
if bs != nil {
|
||||
grpclog.Printf("server setup received when server already exists, closing the existing server")
|
||||
bs.closeFunc()
|
||||
}
|
||||
bs, err = startBenchmarkServer(argtype.Setup, s.serverPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = &testpb.ServerStatus{
|
||||
Stats: bs.getStats(false),
|
||||
Port: int32(bs.port),
|
||||
Cores: int32(bs.cores),
|
||||
}
|
||||
|
||||
case *testpb.ServerArgs_Mark:
|
||||
grpclog.Printf("server mark received:")
|
||||
grpclog.Printf(" - %v", argtype)
|
||||
if bs == nil {
|
||||
return status.Error(codes.InvalidArgument, "server does not exist when mark received")
|
||||
}
|
||||
out = &testpb.ServerStatus{
|
||||
Stats: bs.getStats(argtype.Mark.Reset_),
|
||||
Port: int32(bs.port),
|
||||
Cores: int32(bs.cores),
|
||||
}
|
||||
}
|
||||
|
||||
if err := stream.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error {
|
||||
var bc *benchmarkClient
|
||||
defer func() {
|
||||
// Shut down benchmark client when stream ends.
|
||||
grpclog.Printf("shuting down benchmark client")
|
||||
if bc != nil {
|
||||
bc.shutdown()
|
||||
}
|
||||
}()
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out *testpb.ClientStatus
|
||||
switch t := in.Argtype.(type) {
|
||||
case *testpb.ClientArgs_Setup:
|
||||
grpclog.Printf("client setup received:")
|
||||
if bc != nil {
|
||||
grpclog.Printf("client setup received when client already exists, shuting down the existing client")
|
||||
bc.shutdown()
|
||||
}
|
||||
bc, err = startBenchmarkClient(t.Setup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = &testpb.ClientStatus{
|
||||
Stats: bc.getStats(false),
|
||||
}
|
||||
|
||||
case *testpb.ClientArgs_Mark:
|
||||
grpclog.Printf("client mark received:")
|
||||
grpclog.Printf(" - %v", t)
|
||||
if bc == nil {
|
||||
return status.Error(codes.InvalidArgument, "client does not exist when mark received")
|
||||
}
|
||||
out = &testpb.ClientStatus{
|
||||
Stats: bc.getStats(t.Mark.Reset_),
|
||||
}
|
||||
}
|
||||
|
||||
if err := stream.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) {
|
||||
grpclog.Printf("core count: %v", runtime.NumCPU())
|
||||
return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil
|
||||
}
|
||||
|
||||
func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) {
|
||||
grpclog.Printf("quiting worker")
|
||||
s.stop <- true
|
||||
return &testpb.Void{}, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
grpc.EnableTracing = false
|
||||
|
||||
flag.Parse()
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
grpclog.Printf("worker listening at port %v", *driverPort)
|
||||
|
||||
s := grpc.NewServer()
|
||||
stop := make(chan bool)
|
||||
testpb.RegisterWorkerServiceServer(s, &workerServer{
|
||||
stop: stop,
|
||||
serverPort: *serverPort,
|
||||
})
|
||||
|
||||
go func() {
|
||||
<-stop
|
||||
// Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client.
|
||||
// TODO revise this once server graceful stop is supported in gRPC.
|
||||
time.Sleep(time.Second)
|
||||
s.Stop()
|
||||
}()
|
||||
|
||||
runtime.SetBlockProfileRate(*blockProfRate)
|
||||
|
||||
if *pprofPort >= 0 {
|
||||
go func() {
|
||||
grpclog.Println("Starting pprof server on port " + strconv.Itoa(*pprofPort))
|
||||
grpclog.Println(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
|
||||
}()
|
||||
}
|
||||
|
||||
s.Serve(lis)
|
||||
}
|
35
vendor/google.golang.org/grpc/benchmark/worker/util.go
generated
vendored
Normal file
35
vendor/google.golang.org/grpc/benchmark/worker/util.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "syscall"
|
||||
|
||||
func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float64) {
|
||||
var (
|
||||
utimeDiffs = latest.Utime.Sec - first.Utime.Sec
|
||||
utimeDiffus = latest.Utime.Usec - first.Utime.Usec
|
||||
stimeDiffs = latest.Stime.Sec - first.Stime.Sec
|
||||
stimeDiffus = latest.Stime.Usec - first.Stime.Usec
|
||||
)
|
||||
|
||||
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
|
||||
sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
|
||||
|
||||
return uTimeElapsed, sTimeElapsed
|
||||
}
|
Reference in New Issue
Block a user