mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Fresh dep ensure
This commit is contained in:
141
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
141
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package binarylog implementation binary logging as defined in
|
||||
// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment varialbe or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
var Logger *logger
|
||||
|
||||
func init() {
|
||||
const envStr = "GRPC_BINARY_LOG_FILTER"
|
||||
configStr := os.Getenv(envStr)
|
||||
Logger = newLoggerFromConfigString(configStr)
|
||||
}
|
||||
|
||||
type methodLoggerConfig struct {
|
||||
// Max length of header and message.
|
||||
hdr, msg uint64
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
all *methodLoggerConfig
|
||||
services map[string]*methodLoggerConfig
|
||||
methods map[string]*methodLoggerConfig
|
||||
|
||||
blacklist map[string]struct{}
|
||||
}
|
||||
|
||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||
// using the set* functions.
|
||||
func newEmptyLogger() *logger {
|
||||
return &logger{}
|
||||
}
|
||||
|
||||
// Set method logger for "*".
|
||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||
if l.all != nil {
|
||||
return fmt.Errorf("conflicting global rules found")
|
||||
}
|
||||
l.all = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
return fmt.Errorf("conflicting rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
}
|
||||
l.services[service] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
}
|
||||
l.methods[method] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
}
|
||||
l.blacklist[method] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) GetMethodLogger(methodName string) *MethodLogger {
|
||||
s, m, err := parseMethodName(methodName)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
}
|
||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.services[s]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
}
|
||||
if l.all == nil {
|
||||
return nil
|
||||
}
|
||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
||||
}
|
147
vendor/google.golang.org/grpc/internal/binarylog/binarylog_test.go
generated
vendored
Normal file
147
vendor/google.golang.org/grpc/internal/binarylog/binarylog_test.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test that get method logger returns the one with the most exact match.
|
||||
func TestGetMethodLogger(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
method string
|
||||
hdr, msg uint64
|
||||
}{
|
||||
// Global.
|
||||
{
|
||||
in: "*{h:12;m:23}",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
// service/*.
|
||||
{
|
||||
in: "*,s/*{h:12;m:23}",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
// Service/method.
|
||||
{
|
||||
in: "*{h;m},s/m{h:12;m:23}",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
{
|
||||
in: "*{h;m},s/*{h:314;m},s/m{h:12;m:23}",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
{
|
||||
in: "*{h;m},s/*{h:12;m:23},s/m",
|
||||
method: "/s/m",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
|
||||
// service/*.
|
||||
{
|
||||
in: "*{h;m},s/*{h:12;m:23},s/m1",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
{
|
||||
in: "*{h;m},s1/*,s/m{h:12;m:23}",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
|
||||
// With black list.
|
||||
{
|
||||
in: "*{h:12;m:23},-s/m1",
|
||||
method: "/s/m",
|
||||
hdr: 12, msg: 23,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
l := newLoggerFromConfigString(tc.in)
|
||||
if l == nil {
|
||||
t.Errorf("in: %q, failed to create logger from config string", tc.in)
|
||||
continue
|
||||
}
|
||||
ml := l.GetMethodLogger(tc.method)
|
||||
if ml == nil {
|
||||
t.Errorf("in: %q, method logger is nil, want non-nil", tc.in)
|
||||
continue
|
||||
}
|
||||
|
||||
if ml.headerMaxLen != tc.hdr || ml.messageMaxLen != tc.msg {
|
||||
t.Errorf("in: %q, want header: %v, message: %v, got header: %v, message: %v", tc.in, tc.hdr, tc.msg, ml.headerMaxLen, ml.messageMaxLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// expect method logger to be nil
|
||||
func TestGetMethodLoggerOff(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
method string
|
||||
}{
|
||||
// method not specified.
|
||||
{
|
||||
in: "s1/m",
|
||||
method: "/s/m",
|
||||
},
|
||||
{
|
||||
in: "s/m1",
|
||||
method: "/s/m",
|
||||
},
|
||||
{
|
||||
in: "s1/*",
|
||||
method: "/s/m",
|
||||
},
|
||||
{
|
||||
in: "s1/*,s/m1",
|
||||
method: "/s/m",
|
||||
},
|
||||
|
||||
// blacklisted.
|
||||
{
|
||||
in: "*,-s/m",
|
||||
method: "/s/m",
|
||||
},
|
||||
{
|
||||
in: "s/*,-s/m",
|
||||
method: "/s/m",
|
||||
},
|
||||
{
|
||||
in: "-s/m,s/*",
|
||||
method: "/s/m",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
l := newLoggerFromConfigString(tc.in)
|
||||
if l == nil {
|
||||
t.Errorf("in: %q, failed to create logger from config string", tc.in)
|
||||
continue
|
||||
}
|
||||
ml := l.GetMethodLogger(tc.method)
|
||||
if ml != nil {
|
||||
t.Errorf("in: %q, method logger is non-nil, want nil", tc.in)
|
||||
}
|
||||
}
|
||||
}
|
206
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
206
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// newLoggerFromConfigString reads the string and build a logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the privous config.
|
||||
func newLoggerFromConfigString(s string) *logger {
|
||||
l := newEmptyLogger()
|
||||
methods := strings.Split(s, ",")
|
||||
for _, method := range methods {
|
||||
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||
grpclog.Warningf("failed to parse binary log config: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
||||
// it to the right map in the logger.
|
||||
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
// "" is invalid.
|
||||
if config == "" {
|
||||
return errors.New("empty string is not a valid method binary logging config")
|
||||
}
|
||||
|
||||
// "-service/method", blacklist, no * or {} allowed.
|
||||
if config[0] == '-' {
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if m == "*" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
|
||||
}
|
||||
if suffix != "" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||
}
|
||||
if err := l.setBlacklist(s + "/" + m); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// "*{h:256;m:256}"
|
||||
if config[0] == '*' {
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||
}
|
||||
if m == "*" {
|
||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO: this const is only used by env_config now. But could be useful for
|
||||
// other config. Move to binarylog.go if necessary.
|
||||
maxUInt = ^uint64(0)
|
||||
|
||||
// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
|
||||
// expected output.
|
||||
longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
|
||||
|
||||
// For suffix from above, "{h:123,m:123}". See test for expected output.
|
||||
optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
|
||||
headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
|
||||
messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
|
||||
headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
|
||||
)
|
||||
|
||||
var (
|
||||
longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
|
||||
headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
|
||||
messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
|
||||
headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
|
||||
)
|
||||
|
||||
// Turn "service/method{h;m}" into "service", "method", "{h;m}".
|
||||
func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
|
||||
// Regexp result:
|
||||
//
|
||||
// in: "p.s/m{h:123,m:123}",
|
||||
// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
|
||||
match := longMethodConfigRegexp.FindStringSubmatch(c)
|
||||
if match == nil {
|
||||
return "", "", "", fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
||||
service = match[1]
|
||||
method = match[2]
|
||||
suffix = match[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Turn "{h:123;m:345}" into 123, 345.
|
||||
//
|
||||
// Return maxUInt if length is unspecified.
|
||||
func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
|
||||
if c == "" {
|
||||
return maxUInt, maxUInt, nil
|
||||
}
|
||||
// Header config only.
|
||||
if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return hdrLenStr, 0, nil
|
||||
}
|
||||
return maxUInt, 0, nil
|
||||
}
|
||||
|
||||
// Message config only.
|
||||
if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
}
|
||||
return 0, msgLenStr, nil
|
||||
}
|
||||
return 0, maxUInt, nil
|
||||
}
|
||||
|
||||
// Header and message config both.
|
||||
if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
// Both hdr and msg are specified, but one or two of them might be empty.
|
||||
hdrLenStr = maxUInt
|
||||
msgLenStr = maxUInt
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
if s := match[2]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
return hdrLenStr, msgLenStr, nil
|
||||
}
|
||||
return 0, 0, fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
478
vendor/google.golang.org/grpc/internal/binarylog/env_config_test.go
generated
vendored
Normal file
478
vendor/google.golang.org/grpc/internal/binarylog/env_config_test.go
generated
vendored
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This tests that when multiple configs are specified, all methods loggers will
|
||||
// be set correctly. Correctness of each logger is covered by other unit tests.
|
||||
func TestNewLoggerFromConfigString(t *testing.T) {
|
||||
const (
|
||||
s1 = "s1"
|
||||
m1 = "m1"
|
||||
m2 = "m2"
|
||||
fullM1 = s1 + "/" + m1
|
||||
fullM2 = s1 + "/" + m2
|
||||
)
|
||||
c := fmt.Sprintf("*{h:1;m:2},%s{h},%s{m},%s{h;m}", s1+"/*", fullM1, fullM2)
|
||||
l := newLoggerFromConfigString(c)
|
||||
|
||||
if l.all.hdr != 1 || l.all.msg != 2 {
|
||||
t.Errorf("l.all = %#v, want headerLen: 1, messageLen: 2", l.all)
|
||||
}
|
||||
|
||||
if ml, ok := l.services[s1]; ok {
|
||||
if ml.hdr != maxUInt || ml.msg != 0 {
|
||||
t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.hdr, ml.msg)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("service/* is not set")
|
||||
}
|
||||
|
||||
if ml, ok := l.methods[fullM1]; ok {
|
||||
if ml.hdr != 0 || ml.msg != maxUInt {
|
||||
t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("service/method{h} is not set")
|
||||
}
|
||||
|
||||
if ml, ok := l.methods[fullM2]; ok {
|
||||
if ml.hdr != maxUInt || ml.msg != maxUInt {
|
||||
t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("service/method{h;m} is not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewLoggerFromConfigStringInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"",
|
||||
"*{}",
|
||||
"s/m,*{}",
|
||||
"s/m,s/m{a}",
|
||||
|
||||
// Duplciate rules.
|
||||
"s/m,-s/m",
|
||||
"-s/m,s/m",
|
||||
"s/m,s/m",
|
||||
"s/m,s/m{h:1;m:1}",
|
||||
"s/m{h:1;m:1},s/m",
|
||||
"-s/m,-s/m",
|
||||
"s/*,s/*{h:1;m:1}",
|
||||
"*,*{h:1;m:1}",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
l := newLoggerFromConfigString(tc)
|
||||
if l != nil {
|
||||
t.Errorf("With config %q, want logger %v, got %v", tc, nil, l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMethodConfigAndSuffix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in, service, method, suffix string
|
||||
}{
|
||||
{
|
||||
in: "p.s/m",
|
||||
service: "p.s", method: "m", suffix: "",
|
||||
},
|
||||
{
|
||||
in: "p.s/m{h,m}",
|
||||
service: "p.s", method: "m", suffix: "{h,m}",
|
||||
},
|
||||
{
|
||||
in: "p.s/*",
|
||||
service: "p.s", method: "*", suffix: "",
|
||||
},
|
||||
{
|
||||
in: "p.s/*{h,m}",
|
||||
service: "p.s", method: "*", suffix: "{h,m}",
|
||||
},
|
||||
|
||||
// invalid suffix will be detected by another function.
|
||||
{
|
||||
in: "p.s/m{invalidsuffix}",
|
||||
service: "p.s", method: "m", suffix: "{invalidsuffix}",
|
||||
},
|
||||
{
|
||||
in: "p.s/*{invalidsuffix}",
|
||||
service: "p.s", method: "*", suffix: "{invalidsuffix}",
|
||||
},
|
||||
{
|
||||
in: "s/m*",
|
||||
service: "s", method: "m", suffix: "*",
|
||||
},
|
||||
{
|
||||
in: "s/*m",
|
||||
service: "s", method: "*", suffix: "m",
|
||||
},
|
||||
{
|
||||
in: "s/**",
|
||||
service: "s", method: "*", suffix: "*",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing parseMethodConfigAndSuffix(%q)", tc.in)
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(tc.in)
|
||||
if err != nil {
|
||||
t.Errorf("returned error %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
if s != tc.service {
|
||||
t.Errorf("service = %q, want %q", s, tc.service)
|
||||
}
|
||||
if m != tc.method {
|
||||
t.Errorf("method = %q, want %q", m, tc.method)
|
||||
}
|
||||
if suffix != tc.suffix {
|
||||
t.Errorf("suffix = %q, want %q", suffix, tc.suffix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMethodConfigAndSuffixInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"*/m",
|
||||
"*/m{}",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(tc)
|
||||
if err == nil {
|
||||
t.Errorf("Parsing %q got nil error with %q, %q, %q, want non-nil error", tc, s, m, suffix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHeaderMessageLengthConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h}",
|
||||
hdr: maxUInt, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{h:314}",
|
||||
hdr: 314, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{m}",
|
||||
hdr: 0, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{m:213}",
|
||||
hdr: 0, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h;m}",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m}",
|
||||
hdr: 314, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h;m:213}",
|
||||
hdr: maxUInt, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m:213}",
|
||||
hdr: 314, msg: 213,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing parseHeaderMessageLengthConfig(%q)", tc.in)
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(tc.in)
|
||||
if err != nil {
|
||||
t.Errorf("returned error %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
if hdr != tc.hdr {
|
||||
t.Errorf("header length = %v, want %v", hdr, tc.hdr)
|
||||
}
|
||||
if msg != tc.msg {
|
||||
t.Errorf("message length = %v, want %v", msg, tc.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestParseHeaderMessageLengthConfigInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"{}",
|
||||
"{h;a}",
|
||||
"{h;m;b}",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
_, _, err := parseHeaderMessageLengthConfig(tc)
|
||||
if err == nil {
|
||||
t.Errorf("Parsing %q got nil error, want non-nil error", tc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) {
|
||||
testCases := []string{
|
||||
"p.s/m",
|
||||
"service/method",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
c := "-" + tc
|
||||
t.Logf("testing fillMethodLoggerWithConfigString(%q)", c)
|
||||
l := newEmptyLogger()
|
||||
if err := l.fillMethodLoggerWithConfigString(c); err != nil {
|
||||
t.Errorf("returned err %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
_, ok := l.blacklist[tc]
|
||||
if !ok {
|
||||
t.Errorf("blacklist[%q] is not set", tc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h}",
|
||||
hdr: maxUInt, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{h:314}",
|
||||
hdr: 314, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{m}",
|
||||
hdr: 0, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{m:213}",
|
||||
hdr: 0, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h;m}",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m}",
|
||||
hdr: 314, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h;m:213}",
|
||||
hdr: maxUInt, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m:213}",
|
||||
hdr: 314, msg: 213,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
c := "*" + tc.in
|
||||
t.Logf("testing fillMethodLoggerWithConfigString(%q)", c)
|
||||
l := newEmptyLogger()
|
||||
if err := l.fillMethodLoggerWithConfigString(c); err != nil {
|
||||
t.Errorf("returned err %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
if l.all == nil {
|
||||
t.Errorf("l.all is not set")
|
||||
continue
|
||||
}
|
||||
if hdr := l.all.hdr; hdr != tc.hdr {
|
||||
t.Errorf("header length = %v, want %v", hdr, tc.hdr)
|
||||
|
||||
}
|
||||
if msg := l.all.msg; msg != tc.msg {
|
||||
t.Errorf("message length = %v, want %v", msg, tc.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringPerService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h}",
|
||||
hdr: maxUInt, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{h:314}",
|
||||
hdr: 314, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{m}",
|
||||
hdr: 0, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{m:213}",
|
||||
hdr: 0, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h;m}",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m}",
|
||||
hdr: 314, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h;m:213}",
|
||||
hdr: maxUInt, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m:213}",
|
||||
hdr: 314, msg: 213,
|
||||
},
|
||||
}
|
||||
const serviceName = "service"
|
||||
for _, tc := range testCases {
|
||||
c := serviceName + "/*" + tc.in
|
||||
t.Logf("testing fillMethodLoggerWithConfigString(%q)", c)
|
||||
l := newEmptyLogger()
|
||||
if err := l.fillMethodLoggerWithConfigString(c); err != nil {
|
||||
t.Errorf("returned err %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
ml, ok := l.services[serviceName]
|
||||
if !ok {
|
||||
t.Errorf("l.service[%q] is not set", serviceName)
|
||||
continue
|
||||
}
|
||||
if hdr := ml.hdr; hdr != tc.hdr {
|
||||
t.Errorf("header length = %v, want %v", hdr, tc.hdr)
|
||||
|
||||
}
|
||||
if msg := ml.msg; msg != tc.msg {
|
||||
t.Errorf("message length = %v, want %v", msg, tc.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
hdr, msg uint64
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h}",
|
||||
hdr: maxUInt, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{h:314}",
|
||||
hdr: 314, msg: 0,
|
||||
},
|
||||
{
|
||||
in: "{m}",
|
||||
hdr: 0, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{m:213}",
|
||||
hdr: 0, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h;m}",
|
||||
hdr: maxUInt, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m}",
|
||||
hdr: 314, msg: maxUInt,
|
||||
},
|
||||
{
|
||||
in: "{h;m:213}",
|
||||
hdr: maxUInt, msg: 213,
|
||||
},
|
||||
{
|
||||
in: "{h:314;m:213}",
|
||||
hdr: 314, msg: 213,
|
||||
},
|
||||
}
|
||||
const (
|
||||
serviceName = "service"
|
||||
methodName = "method"
|
||||
fullMethodName = serviceName + "/" + methodName
|
||||
)
|
||||
for _, tc := range testCases {
|
||||
c := fullMethodName + tc.in
|
||||
t.Logf("testing fillMethodLoggerWithConfigString(%q)", c)
|
||||
l := newEmptyLogger()
|
||||
if err := l.fillMethodLoggerWithConfigString(c); err != nil {
|
||||
t.Errorf("returned err %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
ml, ok := l.methods[fullMethodName]
|
||||
if !ok {
|
||||
t.Errorf("l.methods[%q] is not set", fullMethodName)
|
||||
continue
|
||||
}
|
||||
if hdr := ml.hdr; hdr != tc.hdr {
|
||||
t.Errorf("header length = %v, want %v", hdr, tc.hdr)
|
||||
|
||||
}
|
||||
if msg := ml.msg; msg != tc.msg {
|
||||
t.Errorf("message length = %v, want %v", msg, tc.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMethodLoggerWithConfigStringInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"",
|
||||
"{}",
|
||||
"p.s/m{}",
|
||||
"p.s/m{a}",
|
||||
"p.s/m*",
|
||||
"p.s/**",
|
||||
"*/m",
|
||||
|
||||
"-p.s/*",
|
||||
"-p.s/m{h}",
|
||||
}
|
||||
l := &logger{}
|
||||
for _, tc := range testCases {
|
||||
if err := l.fillMethodLoggerWithConfigString(tc); err == nil {
|
||||
t.Errorf("fillMethodLoggerWithConfigString(%q) returned nil error, want non-nil", tc)
|
||||
}
|
||||
}
|
||||
}
|
426
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
426
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
@ -0,0 +1,426 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type callIDGenerator struct {
|
||||
id uint64
|
||||
}
|
||||
|
||||
func (g *callIDGenerator) next() uint64 {
|
||||
id := atomic.AddUint64(&g.id, 1)
|
||||
return id
|
||||
}
|
||||
|
||||
// reset is for testing only, and doesn't need to be thread safe.
|
||||
func (g *callIDGenerator) reset() {
|
||||
g.id = 0
|
||||
}
|
||||
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
type MethodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
idWithinCallGen *callIDGenerator
|
||||
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
||||
return &MethodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: defaultSink, // TODO(blog): make it plugable.
|
||||
}
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
m.CallId = ml.callID
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
|
||||
ml.sink.Write(m)
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
var (
|
||||
bytesLimit = ml.headerMaxLen
|
||||
index int
|
||||
)
|
||||
// At the end of the loop, index will be the first entry where the total
|
||||
// size is greater than the limit:
|
||||
//
|
||||
// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
|
||||
for ; index < len(mdPb.Entry); index++ {
|
||||
entry := mdPb.Entry[index]
|
||||
if entry.Key == "grpc-trace-bin" {
|
||||
// "grpc-trace-bin" is a special key. It's kept in the log entry,
|
||||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.Value))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
bytesLimit -= currentEntryLen
|
||||
}
|
||||
truncated = index < len(mdPb.Entry)
|
||||
mdPb.Entry = mdPb.Entry[:index]
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
|
||||
return false
|
||||
}
|
||||
msgPb.Data = msgPb.Data[:ml.messageMaxLen]
|
||||
return true
|
||||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
type ClientHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
MethodName string
|
||||
Authority string
|
||||
Timeout time.Duration
|
||||
// PeerAddr is required only when it's on server side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
}
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerHeader configs the binary log entry to be a ServerHeader entry.
|
||||
type ServerHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
// PeerAddr is required only when it's on client side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientMessage configs the binary log entry to be a ClientMessage entry.
|
||||
type ClientMessage struct {
|
||||
OnClientSide bool
|
||||
// Message should only be a proto.Message. Could add support for other
|
||||
// message types in the future.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerMessage configs the binary log entry to be a ServerMessage entry.
|
||||
type ServerMessage struct {
|
||||
OnClientSide bool
|
||||
// Message should only be a proto.Message. Could add support for other
|
||||
// message types in the future.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
|
||||
type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
|
||||
type ServerTrailer struct {
|
||||
OnClientSide bool
|
||||
Trailer metadata.MD
|
||||
// Err is the status error.
|
||||
Err error
|
||||
// PeerAddr is required only when it's on client side and the RPC is trailer
|
||||
// only.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclog.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
var (
|
||||
detailsBytes []byte
|
||||
err error
|
||||
)
|
||||
stProto := st.Proto()
|
||||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
StatusDetails: detailsBytes,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Cancel configs the binary log entry to be a Cancel entry.
|
||||
type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// metadataKeyOmit returns whether the metadata entry with this key should be
|
||||
// omitted.
|
||||
func metadataKeyOmit(key string) bool {
|
||||
switch key {
|
||||
case "lb-token", ":path", ":authority", "content-encoding", "user-agent", "te":
|
||||
return true
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(key, "grpc-") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
542
vendor/google.golang.org/grpc/internal/binarylog/method_logger_test.go
generated
vendored
Normal file
542
vendor/google.golang.org/grpc/internal/binarylog/method_logger_test.go
generated
vendored
Normal file
@ -0,0 +1,542 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dpb "github.com/golang/protobuf/ptypes/duration"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
idGen.reset()
|
||||
ml := newMethodLogger(10, 10)
|
||||
// Set sink to testing buffer.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
ml.sink = NewWriterSink(buf)
|
||||
|
||||
addr := "1.2.3.4"
|
||||
port := 790
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("%v:%d", addr, port))
|
||||
addr6 := "2001:1db8:85a3::8a2e:1370:7334"
|
||||
port6 := 796
|
||||
tcpAddr6, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("[%v]:%d", addr6, port6))
|
||||
|
||||
testProtoMsg := &pb.Message{
|
||||
Length: 1,
|
||||
Data: []byte{'a'},
|
||||
}
|
||||
testProtoBytes, _ := proto.Marshal(testProtoMsg)
|
||||
|
||||
testCases := []struct {
|
||||
config LogEntryConfig
|
||||
want *pb.GrpcLogEntry
|
||||
}{
|
||||
{
|
||||
config: &ClientHeader{
|
||||
OnClientSide: false,
|
||||
Header: map[string][]string{
|
||||
"a": {"b", "bb"},
|
||||
},
|
||||
MethodName: "testservice/testmethod",
|
||||
Authority: "test.service.io",
|
||||
Timeout: 2*time.Second + 3*time.Nanosecond,
|
||||
PeerAddr: tcpAddr,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_SERVER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: &pb.ClientHeader{
|
||||
Metadata: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "a", Value: []byte{'b'}},
|
||||
{Key: "a", Value: []byte{'b', 'b'}},
|
||||
},
|
||||
},
|
||||
MethodName: "testservice/testmethod",
|
||||
Authority: "test.service.io",
|
||||
Timeout: &dpb.Duration{
|
||||
Seconds: 2,
|
||||
Nanos: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: &pb.Address{
|
||||
Type: pb.Address_TYPE_IPV4,
|
||||
Address: addr,
|
||||
IpPort: uint32(port),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ClientHeader{
|
||||
OnClientSide: false,
|
||||
MethodName: "testservice/testmethod",
|
||||
Authority: "test.service.io",
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_SERVER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: &pb.ClientHeader{
|
||||
Metadata: &pb.Metadata{},
|
||||
MethodName: "testservice/testmethod",
|
||||
Authority: "test.service.io",
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ServerHeader{
|
||||
OnClientSide: true,
|
||||
Header: map[string][]string{
|
||||
"a": {"b", "bb"},
|
||||
},
|
||||
PeerAddr: tcpAddr6,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
Metadata: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "a", Value: []byte{'b'}},
|
||||
{Key: "a", Value: []byte{'b', 'b'}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: &pb.Address{
|
||||
Type: pb.Address_TYPE_IPV6,
|
||||
Address: addr6,
|
||||
IpPort: uint32(port6),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ClientMessage{
|
||||
OnClientSide: true,
|
||||
Message: testProtoMsg,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(testProtoBytes)),
|
||||
Data: testProtoBytes,
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ServerMessage{
|
||||
OnClientSide: false,
|
||||
Message: testProtoMsg,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_SERVER,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(testProtoBytes)),
|
||||
Data: testProtoBytes,
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ClientHalfClose{
|
||||
OnClientSide: false,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_SERVER,
|
||||
Payload: nil,
|
||||
PayloadTruncated: false,
|
||||
Peer: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ServerTrailer{
|
||||
OnClientSide: true,
|
||||
Err: status.Errorf(codes.Unavailable, "test"),
|
||||
PeerAddr: tcpAddr,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
Metadata: &pb.Metadata{},
|
||||
StatusCode: uint32(codes.Unavailable),
|
||||
StatusMessage: "test",
|
||||
StatusDetails: nil,
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: &pb.Address{
|
||||
Type: pb.Address_TYPE_IPV4,
|
||||
Address: addr,
|
||||
IpPort: uint32(port),
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Err is nil, Log OK status.
|
||||
config: &ServerTrailer{
|
||||
OnClientSide: true,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
Metadata: &pb.Metadata{},
|
||||
StatusCode: uint32(codes.OK),
|
||||
StatusMessage: "",
|
||||
StatusDetails: nil,
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
Peer: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &Cancel{
|
||||
OnClientSide: true,
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: nil,
|
||||
PayloadTruncated: false,
|
||||
Peer: nil,
|
||||
},
|
||||
},
|
||||
|
||||
// gRPC headers should be omitted.
|
||||
{
|
||||
config: &ClientHeader{
|
||||
OnClientSide: false,
|
||||
Header: map[string][]string{
|
||||
"grpc-reserved": {"to be omitted"},
|
||||
":authority": {"to be omitted"},
|
||||
"a": {"b", "bb"},
|
||||
},
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_SERVER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: &pb.ClientHeader{
|
||||
Metadata: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "a", Value: []byte{'b'}},
|
||||
{Key: "a", Value: []byte{'b', 'b'}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &ServerHeader{
|
||||
OnClientSide: true,
|
||||
Header: map[string][]string{
|
||||
"grpc-reserved": {"to be omitted"},
|
||||
":authority": {"to be omitted"},
|
||||
"a": {"b", "bb"},
|
||||
},
|
||||
},
|
||||
want: &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
CallId: 1,
|
||||
SequenceIdWithinCall: 0,
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Logger: pb.GrpcLogEntry_LOGGER_CLIENT,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
Metadata: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "a", Value: []byte{'b'}},
|
||||
{Key: "a", Value: []byte{'b', 'b'}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PayloadTruncated: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
buf.Reset()
|
||||
tc.want.SequenceIdWithinCall = uint64(i + 1)
|
||||
ml.Log(tc.config)
|
||||
inSink := new(pb.GrpcLogEntry)
|
||||
if err := proto.Unmarshal(buf.Bytes(), inSink); err != nil {
|
||||
t.Errorf("failed to unmarshal bytes in sink to proto: %v", err)
|
||||
continue
|
||||
}
|
||||
inSink.Timestamp = nil // Strip timestamp before comparing.
|
||||
if !proto.Equal(inSink, tc.want) {
|
||||
t.Errorf("Log(%+v), in sink: %+v, want %+v", tc.config, inSink, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateMetadataNotTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
mpPb *pb.Metadata
|
||||
}{
|
||||
{
|
||||
ml: newMethodLogger(maxUInt, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(1, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1, 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
{Key: "", Value: []byte{1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// "grpc-trace-bin" is kept in log but not counted towards the size
|
||||
// limit.
|
||||
{
|
||||
ml: newMethodLogger(1, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
{Key: "grpc-trace-bin", Value: []byte("some.trace.key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
truncated := tc.ml.truncateMetadata(tc.mpPb)
|
||||
if truncated {
|
||||
t.Errorf("test case %v, returned truncated, want not truncated", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateMetadataTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
mpPb *pb.Metadata
|
||||
|
||||
entryLen int
|
||||
}{
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1, 1, 1}},
|
||||
},
|
||||
},
|
||||
entryLen: 0,
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
{Key: "", Value: []byte{1}},
|
||||
{Key: "", Value: []byte{1}},
|
||||
},
|
||||
},
|
||||
entryLen: 2,
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1, 1}},
|
||||
{Key: "", Value: []byte{1}},
|
||||
},
|
||||
},
|
||||
entryLen: 1,
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(2, maxUInt),
|
||||
mpPb: &pb.Metadata{
|
||||
Entry: []*pb.MetadataEntry{
|
||||
{Key: "", Value: []byte{1}},
|
||||
{Key: "", Value: []byte{1, 1}},
|
||||
},
|
||||
},
|
||||
entryLen: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
truncated := tc.ml.truncateMetadata(tc.mpPb)
|
||||
if !truncated {
|
||||
t.Errorf("test case %v, returned not truncated, want truncated", i)
|
||||
continue
|
||||
}
|
||||
if len(tc.mpPb.Entry) != tc.entryLen {
|
||||
t.Errorf("test case %v, entry length: %v, want: %v", i, len(tc.mpPb.Entry), tc.entryLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateMessageNotTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
msgPb *pb.Message
|
||||
}{
|
||||
{
|
||||
ml: newMethodLogger(maxUInt, maxUInt),
|
||||
msgPb: &pb.Message{
|
||||
Data: []byte{1},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(maxUInt, 3),
|
||||
msgPb: &pb.Message{
|
||||
Data: []byte{1, 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
ml: newMethodLogger(maxUInt, 2),
|
||||
msgPb: &pb.Message{
|
||||
Data: []byte{1, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
truncated := tc.ml.truncateMessage(tc.msgPb)
|
||||
if truncated {
|
||||
t.Errorf("test case %v, returned truncated, want not truncated", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateMessageTruncated(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ml *MethodLogger
|
||||
msgPb *pb.Message
|
||||
|
||||
oldLength uint32
|
||||
}{
|
||||
{
|
||||
ml: newMethodLogger(maxUInt, 2),
|
||||
msgPb: &pb.Message{
|
||||
Length: 3,
|
||||
Data: []byte{1, 1, 1},
|
||||
},
|
||||
oldLength: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
truncated := tc.ml.truncateMessage(tc.msgPb)
|
||||
if !truncated {
|
||||
t.Errorf("test case %v, returned not truncated, want truncated", i)
|
||||
continue
|
||||
}
|
||||
if len(tc.msgPb.Data) != int(tc.ml.messageMaxLen) {
|
||||
t.Errorf("test case %v, message length: %v, want: %v", i, len(tc.msgPb.Data), tc.ml.messageMaxLen)
|
||||
}
|
||||
if tc.msgPb.Length != tc.oldLength {
|
||||
t.Errorf("test case %v, message.Length field: %v, want: %v", i, tc.msgPb.Length, tc.oldLength)
|
||||
}
|
||||
}
|
||||
}
|
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
Executable file
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2018 gRPC authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
TMP=$(mktemp -d)
|
||||
|
||||
function finish {
|
||||
rm -rf "$TMP"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
pushd "$TMP"
|
||||
mkdir -p grpc/binarylog/grpc_binarylog_v1
|
||||
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
|
||||
|
||||
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
|
||||
popd
|
||||
rm -f ./grpc_binarylog_v1/*.pb.go
|
||||
cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
|
||||
|
179
vendor/google.golang.org/grpc/internal/binarylog/regexp_test.go
generated
vendored
Normal file
179
vendor/google.golang.org/grpc/internal/binarylog/regexp_test.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLongMethodConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
}{
|
||||
{in: "", out: nil},
|
||||
{in: "*/m", out: nil},
|
||||
|
||||
{
|
||||
in: "p.s/m{}",
|
||||
out: []string{"p.s/m{}", "p.s", "m", "{}"},
|
||||
},
|
||||
|
||||
{
|
||||
in: "p.s/m",
|
||||
out: []string{"p.s/m", "p.s", "m", ""},
|
||||
},
|
||||
{
|
||||
in: "p.s/m{h}",
|
||||
out: []string{"p.s/m{h}", "p.s", "m", "{h}"},
|
||||
},
|
||||
{
|
||||
in: "p.s/m{m}",
|
||||
out: []string{"p.s/m{m}", "p.s", "m", "{m}"},
|
||||
},
|
||||
{
|
||||
in: "p.s/m{h:123}",
|
||||
out: []string{"p.s/m{h:123}", "p.s", "m", "{h:123}"},
|
||||
},
|
||||
{
|
||||
in: "p.s/m{m:123}",
|
||||
out: []string{"p.s/m{m:123}", "p.s", "m", "{m:123}"},
|
||||
},
|
||||
{
|
||||
in: "p.s/m{h:123,m:123}",
|
||||
out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
|
||||
},
|
||||
|
||||
{
|
||||
in: "p.s/*",
|
||||
out: []string{"p.s/*", "p.s", "*", ""},
|
||||
},
|
||||
{
|
||||
in: "p.s/*{h}",
|
||||
out: []string{"p.s/*{h}", "p.s", "*", "{h}"},
|
||||
},
|
||||
|
||||
{
|
||||
in: "s/m*",
|
||||
out: []string{"s/m*", "s", "m", "*"},
|
||||
},
|
||||
{
|
||||
in: "s/**",
|
||||
out: []string{"s/**", "s", "*", "*"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
match := longMethodConfigRegexp.FindStringSubmatch(tc.in)
|
||||
if !reflect.DeepEqual(match, tc.out) {
|
||||
t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
}{
|
||||
{in: "{}", out: nil},
|
||||
{in: "{a:b}", out: nil},
|
||||
{in: "{m:123}", out: nil},
|
||||
{in: "{h:123;m:123}", out: nil},
|
||||
|
||||
{
|
||||
in: "{h}",
|
||||
out: []string{"{h}", ""},
|
||||
},
|
||||
{
|
||||
in: "{h:123}",
|
||||
out: []string{"{h:123}", "123"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
match := headerConfigRegexp.FindStringSubmatch(tc.in)
|
||||
if !reflect.DeepEqual(match, tc.out) {
|
||||
t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
}{
|
||||
{in: "{}", out: nil},
|
||||
{in: "{a:b}", out: nil},
|
||||
{in: "{h:123}", out: nil},
|
||||
{in: "{h:123;m:123}", out: nil},
|
||||
|
||||
{
|
||||
in: "{m}",
|
||||
out: []string{"{m}", ""},
|
||||
},
|
||||
{
|
||||
in: "{m:123}",
|
||||
out: []string{"{m:123}", "123"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
match := messageConfigRegexp.FindStringSubmatch(tc.in)
|
||||
if !reflect.DeepEqual(match, tc.out) {
|
||||
t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderMessageConfigRegexp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in string
|
||||
out []string
|
||||
}{
|
||||
{in: "{}", out: nil},
|
||||
{in: "{a:b}", out: nil},
|
||||
{in: "{h}", out: nil},
|
||||
{in: "{h:123}", out: nil},
|
||||
{in: "{m}", out: nil},
|
||||
{in: "{m:123}", out: nil},
|
||||
|
||||
{
|
||||
in: "{h;m}",
|
||||
out: []string{"{h;m}", "", ""},
|
||||
},
|
||||
{
|
||||
in: "{h:123;m}",
|
||||
out: []string{"{h:123;m}", "123", ""},
|
||||
},
|
||||
{
|
||||
in: "{h;m:123}",
|
||||
out: []string{"{h;m:123}", "", "123"},
|
||||
},
|
||||
{
|
||||
in: "{h:123;m:123}",
|
||||
out: []string{"{h:123;m:123}", "123", "123"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
match := headerMessageConfigRegexp.FindStringSubmatch(tc.in)
|
||||
if !reflect.DeepEqual(match, tc.out) {
|
||||
t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
64
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
64
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
)
|
||||
|
||||
// SetDefaultSink sets the sink where binary logs will be written to.
|
||||
//
|
||||
// Not thread safe. Only set during initialization.
|
||||
func SetDefaultSink(s Sink) {
|
||||
defaultSink = s
|
||||
}
|
||||
|
||||
// Sink writes log entry into the binary log sink.
|
||||
type Sink interface {
|
||||
Write(*pb.GrpcLogEntry)
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) {}
|
||||
|
||||
// NewWriterSink creates a binary log sink with the given writer.
|
||||
func NewWriterSink(w io.Writer) Sink {
|
||||
return &writerSink{out: w}
|
||||
}
|
||||
|
||||
type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (fs *writerSink) Write(e *pb.GrpcLogEntry) {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
fs.out.Write(b)
|
||||
}
|
41
vendor/google.golang.org/grpc/internal/binarylog/util.go
generated
vendored
Normal file
41
vendor/google.golang.org/grpc/internal/binarylog/util.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseMethodName splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
// TODO: move to internal/grpcutil.
|
||||
func parseMethodName(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
59
vendor/google.golang.org/grpc/internal/binarylog/util_test.go
generated
vendored
Normal file
59
vendor/google.golang.org/grpc/internal/binarylog/util_test.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseMethodName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
methodName string
|
||||
service, method string
|
||||
}{
|
||||
{methodName: "/s/m", service: "s", method: "m"},
|
||||
{methodName: "/p.s/m", service: "p.s", method: "m"},
|
||||
{methodName: "/p/s/m", service: "p/s", method: "m"},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s, m, err := parseMethodName(tc.methodName)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing %q got error %v, want nil", tc.methodName, err)
|
||||
continue
|
||||
}
|
||||
if s != tc.service || m != tc.method {
|
||||
t.Errorf("Parseing %q got service %q, method %q, want service %q, method %q",
|
||||
tc.methodName, s, m, tc.service, tc.method,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMethodNameInvalid(t *testing.T) {
|
||||
testCases := []string{
|
||||
"/",
|
||||
"/sm",
|
||||
"",
|
||||
"sm",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
_, _, err := parseMethodName(tc)
|
||||
if err == nil {
|
||||
t.Errorf("Parsing %q got nil error, want non-nil error", tc)
|
||||
}
|
||||
}
|
||||
}
|
103
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
103
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -27,16 +27,22 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTraceEntry int32 = 30
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = 50
|
||||
curState int32
|
||||
EntryPerPage = 50
|
||||
curState int32
|
||||
maxTraceEntry = defaultMaxTraceEntry
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
@ -52,6 +58,22 @@ func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
}
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||
// Setting it to 0 will disable channel tracing.
|
||||
func SetMaxTraceEntry(i int32) {
|
||||
atomic.StoreInt32(&maxTraceEntry, i)
|
||||
}
|
||||
|
||||
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
|
||||
func ResetMaxTraceEntryToDefault() {
|
||||
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||
}
|
||||
|
||||
func getMaxTraceEntry() int {
|
||||
i := atomic.LoadInt32(&maxTraceEntry)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
@ -146,6 +168,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
@ -170,6 +193,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
@ -226,6 +250,24 @@ func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
||||
// to the channel trace.
|
||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
||||
// trace also.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(id int64, desc *TraceEventDesc) {
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
db.get().traceEvent(id, desc)
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
@ -251,6 +293,7 @@ func (c *channelMap) addServer(id int64, s *server) {
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
@ -263,6 +306,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
@ -284,16 +328,25 @@ func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
||||
// entry, if it has to wait on the deletion of its children, or may lead to a chain
|
||||
// of entry deletion. For example, deleting the last socket of a gracefully shutting
|
||||
// down server will lead to the server being also deleted.
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
|
||||
// wait on the deletion of its children and until no other entity's channel trace references it.
|
||||
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
|
||||
// shutting down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||
e := c.findEntry(id)
|
||||
if v, ok := e.(tracedChannel); ok {
|
||||
v.decrTraceRefCount()
|
||||
e.deleteSelfIfReady()
|
||||
}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
@ -347,6 +400,39 @@ func (c *channelMap) deleteEntry(id int64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
|
||||
c.mu.Lock()
|
||||
child := c.findEntry(id)
|
||||
childTC, ok := child.(tracedChannel)
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||
if desc.Parent != nil {
|
||||
parent := c.findEntry(child.getParentID())
|
||||
var chanType RefChannelType
|
||||
switch child.(type) {
|
||||
case *channel:
|
||||
chanType = RefChannel
|
||||
case *subChannel:
|
||||
chanType = RefSubChannel
|
||||
}
|
||||
if parentTC, ok := parent.(tracedChannel); ok {
|
||||
parentTC.getChannelTrace().append(&TraceEvent{
|
||||
Desc: desc.Parent.Desc,
|
||||
Severity: desc.Parent.Severity,
|
||||
Timestamp: time.Now(),
|
||||
RefID: id,
|
||||
RefName: childTC.getRefName(),
|
||||
RefType: chanType,
|
||||
})
|
||||
childTC.incrTraceRefCount()
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
@ -408,6 +494,7 @@ func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
t[i].Trace = cn.trace.dumpData()
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
@ -470,7 +557,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric,
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort((int64Slice(ids)))
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
@ -518,6 +605,7 @@ func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm.ChannelData = cn.c.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
cm.Trace = cn.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
@ -536,6 +624,7 @@ func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm.ChannelData = sc.c.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
cm.Trace = sc.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
|
318
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
318
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
@ -20,9 +20,12 @@ package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
@ -39,6 +42,8 @@ type entry interface {
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||
getParentID() int64
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
@ -72,6 +77,10 @@ func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
func (*dummyEntry) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
@ -94,6 +103,8 @@ type ChannelMetric struct {
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
@ -120,6 +131,8 @@ type SubChannelMetric struct {
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
@ -137,7 +150,35 @@ type ChannelInternalMetric struct {
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// ChannelTrace stores traced events on a channel/subchannel and related info.
|
||||
type ChannelTrace struct {
|
||||
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
|
||||
EventNum int64
|
||||
// CreationTime is the creation time of the trace.
|
||||
CreationTime time.Time
|
||||
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
|
||||
// oldest one)
|
||||
Events []*TraceEvent
|
||||
}
|
||||
|
||||
// TraceEvent represent a single trace event
|
||||
type TraceEvent struct {
|
||||
// Desc is a simple description of the trace event.
|
||||
Desc string
|
||||
// Severity states the severity of this trace event.
|
||||
Severity Severity
|
||||
// Timestamp is the event time.
|
||||
Timestamp time.Time
|
||||
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||
// involved in this event.
|
||||
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||
RefID int64
|
||||
// RefName is the reference name for the entity that gets referenced in the event.
|
||||
RefName string
|
||||
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||
RefType RefChannelType
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
@ -146,6 +187,12 @@ type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type dummyChannel struct{}
|
||||
|
||||
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
|
||||
return &ChannelInternalMetric{}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
@ -155,6 +202,10 @@ type channel struct {
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
// traceRefCount is the number of trace events that reference this channel.
|
||||
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
@ -179,25 +230,96 @@ func (c *channel) triggerDelete() {
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
func (c *channel) getParentID() int64 {
|
||||
return c.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||
// deleting the channel reference from its parent's child list.
|
||||
//
|
||||
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (c *channel) deleteSelfFromTree() (deleted bool) {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return
|
||||
return false
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||
// channel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
if c.getTraceRefCount() != 0 {
|
||||
c.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !c.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
c.trace.clear()
|
||||
}
|
||||
|
||||
func (c *channel) getChannelTrace() *channelTrace {
|
||||
return c.trace
|
||||
}
|
||||
|
||||
func (c *channel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (c *channel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (c *channel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&c.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (c *channel) getRefName() string {
|
||||
return c.refName
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
@ -218,12 +340,82 @@ func (sc *subChannel) triggerDelete() {
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
func (sc *subChannel) getParentID() int64 {
|
||||
return sc.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||
// means deleting the subchannel reference from its parent's child list.
|
||||
//
|
||||
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return false
|
||||
}
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||
// the subchannel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
if sc.getTraceRefCount() != 0 {
|
||||
// free the grpc struct (i.e. addrConn)
|
||||
sc.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !sc.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
sc.trace.clear()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getChannelTrace() *channelTrace {
|
||||
return sc.trace
|
||||
}
|
||||
|
||||
func (sc *subChannel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getRefName() string {
|
||||
return sc.refName
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
@ -281,9 +473,9 @@ type SocketInternalMetric struct {
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
//TODO: socket options
|
||||
//TODO: Security
|
||||
RemoteName string
|
||||
SocketOptions *SocketOptionData
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
@ -317,6 +509,10 @@ func (ls *listenSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
return ls.pid
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
@ -342,6 +538,10 @@ func (ns *normalSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
return ns.pid
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
@ -369,7 +569,6 @@ type ServerInternalMetric struct {
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
@ -416,3 +615,88 @@ func (s *server) deleteSelfIfReady() {
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
||||
|
||||
func (s *server) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type tracedChannel interface {
|
||||
getChannelTrace() *channelTrace
|
||||
incrTraceRefCount()
|
||||
decrTraceRefCount()
|
||||
getRefName() string
|
||||
}
|
||||
|
||||
type channelTrace struct {
|
||||
cm *channelMap
|
||||
createdTime time.Time
|
||||
eventCount int64
|
||||
mu sync.Mutex
|
||||
events []*TraceEvent
|
||||
}
|
||||
|
||||
func (c *channelTrace) append(e *TraceEvent) {
|
||||
c.mu.Lock()
|
||||
if len(c.events) == getMaxTraceEntry() {
|
||||
del := c.events[0]
|
||||
c.events = c.events[1:]
|
||||
if del.RefID != 0 {
|
||||
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||
go func() {
|
||||
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||
c.cm.mu.Lock()
|
||||
c.cm.decrTraceRefCount(del.RefID)
|
||||
c.cm.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
e.Timestamp = time.Now()
|
||||
c.events = append(c.events, e)
|
||||
c.eventCount++
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelTrace) clear() {
|
||||
c.mu.Lock()
|
||||
for _, e := range c.events {
|
||||
if e.RefID != 0 {
|
||||
// caller should have already held the c.cm.mu lock.
|
||||
c.cm.decrTraceRefCount(e.RefID)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Severity is the severity level of a trace event.
|
||||
// The canonical enumeration of all valid values is here:
|
||||
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUNKNOWN indicates unknown severity of a trace event.
|
||||
CtUNKNOWN Severity = iota
|
||||
// CtINFO indicates info level severity of a trace event.
|
||||
CtINFO
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
CtError
|
||||
)
|
||||
|
||||
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel RefChannelType = iota
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
)
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
ct.Events = c.events[:len(c.events)]
|
||||
c.mu.Unlock()
|
||||
return ct
|
||||
}
|
||||
|
53
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
Normal file
53
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// +build !appengine,go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
type SocketOptionData struct {
|
||||
Linger *unix.Linger
|
||||
RecvTimeout *unix.Timeval
|
||||
SendTimeout *unix.Timeval
|
||||
TCPInfo *unix.TCPInfo
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
|
||||
s.Linger = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
|
||||
s.RecvTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
|
||||
s.SendTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
|
||||
s.TCPInfo = v
|
||||
}
|
||||
}
|
44
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
Normal file
44
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// +build !linux appengine !go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
// Windows OS doesn't support Socket Option
|
||||
type SocketOptionData struct {
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
once.Do(func() {
|
||||
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
})
|
||||
}
|
39
vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
generated
vendored
Normal file
39
vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// +build linux,go1.9,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket interface{}) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
data := &SocketOptionData{}
|
||||
if rawConn, err := c.SyscallConn(); err == nil {
|
||||
rawConn.Control(data.Getsockopt)
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
26
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
generated
vendored
Normal file
26
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// +build !linux !go1.9 appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c interface{}) *SocketOptionData {
|
||||
return nil
|
||||
}
|
90
vendor/google.golang.org/grpc/internal/channelz/util_test.go
generated
vendored
Normal file
90
vendor/google.golang.org/grpc/internal/channelz/util_test.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// +build linux,go1.10,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// The test in this file should be run in an environment that has go1.10 or later,
|
||||
// as the function SyscallConn() (required to get socket option) was introduced
|
||||
// to net.TCPListener in go1.10.
|
||||
|
||||
package channelz_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
)
|
||||
|
||||
func TestGetSocketOpt(t *testing.T) {
|
||||
network, addr := "tcp", ":0"
|
||||
ln, err := net.Listen(network, addr)
|
||||
if err != nil {
|
||||
t.Fatalf("net.Listen(%s,%s) failed with err: %v", network, addr, err)
|
||||
}
|
||||
defer ln.Close()
|
||||
go func() {
|
||||
ln.Accept()
|
||||
}()
|
||||
conn, _ := net.Dial(network, ln.Addr().String())
|
||||
defer conn.Close()
|
||||
tcpc := conn.(*net.TCPConn)
|
||||
raw, err := tcpc.SyscallConn()
|
||||
if err != nil {
|
||||
t.Fatalf("SyscallConn() failed due to %v", err)
|
||||
}
|
||||
|
||||
l := &unix.Linger{Onoff: 1, Linger: 5}
|
||||
recvTimout := &unix.Timeval{Sec: 100}
|
||||
sendTimeout := &unix.Timeval{Sec: 8888}
|
||||
raw.Control(func(fd uintptr) {
|
||||
err := unix.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to SetsockoptLinger(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l, err)
|
||||
}
|
||||
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout, err)
|
||||
}
|
||||
err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout, err)
|
||||
}
|
||||
})
|
||||
sktopt := channelz.GetSocketOption(conn)
|
||||
if !reflect.DeepEqual(sktopt.Linger, l) {
|
||||
t.Fatalf("get socket option linger, want: %v, got %v", l, sktopt.Linger)
|
||||
}
|
||||
if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimout) {
|
||||
t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimout, sktopt.RecvTimeout)
|
||||
}
|
||||
if !reflect.DeepEqual(sktopt.SendTimeout, sendTimeout) {
|
||||
t.Logf("get socket option send timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", sendTimeout, sktopt.SendTimeout)
|
||||
}
|
||||
if sktopt == nil || sktopt.TCPInfo != nil && sktopt.TCPInfo.State != 1 {
|
||||
t.Fatalf("TCPInfo.State want 1 (TCP_ESTABLISHED), got %v", sktopt)
|
||||
}
|
||||
|
||||
sktopt = channelz.GetSocketOption(ln)
|
||||
if sktopt == nil || sktopt.TCPInfo == nil || sktopt.TCPInfo.State != 10 {
|
||||
t.Fatalf("TCPInfo.State want 10 (TCP_LISTEN), got %v", sktopt)
|
||||
}
|
||||
}
|
35
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
35
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package envconfig contains grpc settings configured by environment variables.
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
)
|
61
vendor/google.golang.org/grpc/internal/grpcsync/event.go
generated
vendored
Normal file
61
vendor/google.golang.org/grpc/internal/grpcsync/event.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcsync implements additional synchronization primitives built upon
|
||||
// the sync package.
|
||||
package grpcsync
|
||||
|
||||
import "sync"
|
||||
|
||||
// Event represents a one-time event that may occur in the future.
|
||||
type Event struct {
|
||||
c chan struct{}
|
||||
o sync.Once
|
||||
}
|
||||
|
||||
// Fire causes e to complete. It is safe to call multiple times, and
|
||||
// concurrently. It returns true iff this call to Fire caused the signaling
|
||||
// channel returned by Done to close.
|
||||
func (e *Event) Fire() bool {
|
||||
ret := false
|
||||
e.o.Do(func() {
|
||||
close(e.c)
|
||||
ret = true
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
// Done returns a channel that will be closed when Fire is called.
|
||||
func (e *Event) Done() <-chan struct{} {
|
||||
return e.c
|
||||
}
|
||||
|
||||
// HasFired returns true if Fire has been called.
|
||||
func (e *Event) HasFired() bool {
|
||||
select {
|
||||
case <-e.c:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// NewEvent returns a new, ready-to-use Event.
|
||||
func NewEvent() *Event {
|
||||
return &Event{c: make(chan struct{})}
|
||||
}
|
69
vendor/google.golang.org/grpc/internal/grpcsync/event_test.go
generated
vendored
Normal file
69
vendor/google.golang.org/grpc/internal/grpcsync/event_test.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEventHasFired(t *testing.T) {
|
||||
e := NewEvent()
|
||||
if e.HasFired() {
|
||||
t.Fatal("e.HasFired() = true; want false")
|
||||
}
|
||||
if !e.Fire() {
|
||||
t.Fatal("e.Fire() = false; want true")
|
||||
}
|
||||
if !e.HasFired() {
|
||||
t.Fatal("e.HasFired() = false; want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventDoneChannel(t *testing.T) {
|
||||
e := NewEvent()
|
||||
select {
|
||||
case <-e.Done():
|
||||
t.Fatal("e.HasFired() = true; want false")
|
||||
default:
|
||||
}
|
||||
if !e.Fire() {
|
||||
t.Fatal("e.Fire() = false; want true")
|
||||
}
|
||||
select {
|
||||
case <-e.Done():
|
||||
default:
|
||||
t.Fatal("e.HasFired() = false; want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventMultipleFires(t *testing.T) {
|
||||
e := NewEvent()
|
||||
if e.HasFired() {
|
||||
t.Fatal("e.HasFired() = true; want false")
|
||||
}
|
||||
if !e.Fire() {
|
||||
t.Fatal("e.Fire() = false; want true")
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
if !e.HasFired() {
|
||||
t.Fatal("e.HasFired() = false; want true")
|
||||
}
|
||||
if e.Fire() {
|
||||
t.Fatal("e.Fire() = true; want false")
|
||||
}
|
||||
}
|
||||
}
|
23
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
23
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -20,17 +20,24 @@
|
||||
// symbols to avoid circular dependencies.
|
||||
package internal
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
var (
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
TestingUseHandlerImpl func(grpcServer interface{})
|
||||
|
||||
// WithContextDialer is exported by clientconn.go
|
||||
WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
|
||||
// WithResolverBuilder is exported by clientconn.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
|
||||
)
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
CredsBundleModeFallback = "fallback"
|
||||
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
|
||||
// mode.
|
||||
CredsBundleModeBalancer = "balancer"
|
||||
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
|
||||
// that supports backend returned by grpclb balancer.
|
||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||
)
|
||||
|
67
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
Normal file
67
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// +build !appengine,go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package syscall provides functionalities that grpc uses to get low-level operating system
|
||||
// stats/info.
|
||||
package syscall
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
func GetCPUTime() int64 {
|
||||
var ts unix.Timespec
|
||||
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
|
||||
grpclog.Fatal(err)
|
||||
}
|
||||
return ts.Nano()
|
||||
}
|
||||
|
||||
// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
|
||||
type Rusage syscall.Rusage
|
||||
|
||||
// GetRusage returns the resource usage of current process.
|
||||
func GetRusage() (rusage *Rusage) {
|
||||
rusage = new(Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
|
||||
return
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
f := (*syscall.Rusage)(first)
|
||||
l := (*syscall.Rusage)(latest)
|
||||
var (
|
||||
utimeDiffs = l.Utime.Sec - f.Utime.Sec
|
||||
utimeDiffus = l.Utime.Usec - f.Utime.Usec
|
||||
stimeDiffs = l.Stime.Sec - f.Stime.Sec
|
||||
stimeDiffus = l.Stime.Usec - f.Stime.Usec
|
||||
)
|
||||
|
||||
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
|
||||
sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
|
||||
|
||||
return uTimeElapsed, sTimeElapsed
|
||||
}
|
47
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// +build !linux appengine !go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package syscall
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
func init() {
|
||||
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
|
||||
}
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
// It always returns 0 under non-linux or appengine environment.
|
||||
func GetCPUTime() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Rusage is an empty struct under non-linux or appengine environment.
|
||||
type Rusage struct{}
|
||||
|
||||
// GetRusage is a no-op function under non-linux or appengine environment.
|
||||
func GetRusage() (rusage *Rusage) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
return 0, 0
|
||||
}
|
95
vendor/google.golang.org/grpc/internal/testutils/pipe_listener.go
generated
vendored
Normal file
95
vendor/google.golang.org/grpc/internal/testutils/pipe_listener.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errClosed = errors.New("closed")
|
||||
|
||||
type pipeAddr struct{}
|
||||
|
||||
func (p pipeAddr) Network() string { return "pipe" }
|
||||
func (p pipeAddr) String() string { return "pipe" }
|
||||
|
||||
// PipeListener is a listener with an unbuffered pipe. Each write will complete only once the other side reads. It
|
||||
// should only be created using NewPipeListener.
|
||||
type PipeListener struct {
|
||||
c chan chan<- net.Conn
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewPipeListener creates a new pipe listener.
|
||||
func NewPipeListener() *PipeListener {
|
||||
return &PipeListener{
|
||||
c: make(chan chan<- net.Conn),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Accept accepts a connection.
|
||||
func (p *PipeListener) Accept() (net.Conn, error) {
|
||||
var connChan chan<- net.Conn
|
||||
select {
|
||||
case <-p.done:
|
||||
return nil, errClosed
|
||||
case connChan = <-p.c:
|
||||
select {
|
||||
case <-p.done:
|
||||
close(connChan)
|
||||
return nil, errClosed
|
||||
default:
|
||||
}
|
||||
}
|
||||
c1, c2 := net.Pipe()
|
||||
connChan <- c1
|
||||
close(connChan)
|
||||
return c2, nil
|
||||
}
|
||||
|
||||
// Close closes the listener.
|
||||
func (p *PipeListener) Close() error {
|
||||
close(p.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns a pipe addr.
|
||||
func (p *PipeListener) Addr() net.Addr {
|
||||
return pipeAddr{}
|
||||
}
|
||||
|
||||
// Dialer dials a connection.
|
||||
func (p *PipeListener) Dialer() func(string, time.Duration) (net.Conn, error) {
|
||||
return func(string, time.Duration) (net.Conn, error) {
|
||||
connChan := make(chan net.Conn)
|
||||
select {
|
||||
case p.c <- connChan:
|
||||
case <-p.done:
|
||||
return nil, errClosed
|
||||
}
|
||||
conn, ok := <-connChan
|
||||
if !ok {
|
||||
return nil, errClosed
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
}
|
163
vendor/google.golang.org/grpc/internal/testutils/pipe_listener_test.go
generated
vendored
Normal file
163
vendor/google.golang.org/grpc/internal/testutils/pipe_listener_test.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package testutils_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/testutils"
|
||||
)
|
||||
|
||||
func TestPipeListener(t *testing.T) {
|
||||
pl := testutils.NewPipeListener()
|
||||
recvdBytes := make(chan []byte)
|
||||
const want = "hello world"
|
||||
|
||||
go func() {
|
||||
c, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
read := make([]byte, len(want))
|
||||
_, err = c.Read(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
recvdBytes <- read
|
||||
}()
|
||||
|
||||
dl := pl.Dialer()
|
||||
conn, err := dl("", time.Duration(0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = conn.Write([]byte(want))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case gotBytes := <-recvdBytes:
|
||||
got := string(gotBytes)
|
||||
if got != want {
|
||||
t.Fatalf("expected to get %s, got %s", got, want)
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("timed out waiting for server to receive bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnblocking(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
blockFuncShouldError bool
|
||||
blockFunc func(*testutils.PipeListener, chan struct{}) error
|
||||
unblockFunc func(*testutils.PipeListener) error
|
||||
}{
|
||||
{
|
||||
desc: "Accept unblocks Dial",
|
||||
blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error {
|
||||
dl := pl.Dialer()
|
||||
_, err := dl("", time.Duration(0))
|
||||
close(done)
|
||||
return err
|
||||
},
|
||||
unblockFunc: func(pl *testutils.PipeListener) error {
|
||||
_, err := pl.Accept()
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Close unblocks Dial",
|
||||
blockFuncShouldError: true, // because pl.Close will be called
|
||||
blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error {
|
||||
dl := pl.Dialer()
|
||||
_, err := dl("", time.Duration(0))
|
||||
close(done)
|
||||
return err
|
||||
},
|
||||
unblockFunc: func(pl *testutils.PipeListener) error {
|
||||
return pl.Close()
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Dial unblocks Accept",
|
||||
blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error {
|
||||
_, err := pl.Accept()
|
||||
close(done)
|
||||
return err
|
||||
},
|
||||
unblockFunc: func(pl *testutils.PipeListener) error {
|
||||
dl := pl.Dialer()
|
||||
_, err := dl("", time.Duration(0))
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Close unblocks Accept",
|
||||
blockFuncShouldError: true, // because pl.Close will be called
|
||||
blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error {
|
||||
_, err := pl.Accept()
|
||||
close(done)
|
||||
return err
|
||||
},
|
||||
unblockFunc: func(pl *testutils.PipeListener) error {
|
||||
return pl.Close()
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Log(test.desc)
|
||||
testUnblocking(t, test.blockFunc, test.unblockFunc, test.blockFuncShouldError)
|
||||
}
|
||||
}
|
||||
|
||||
func testUnblocking(t *testing.T, blockFunc func(*testutils.PipeListener, chan struct{}) error, unblockFunc func(*testutils.PipeListener) error, blockFuncShouldError bool) {
|
||||
pl := testutils.NewPipeListener()
|
||||
dialFinished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
err := blockFunc(pl, dialFinished)
|
||||
if blockFuncShouldError && err == nil {
|
||||
t.Error("expected blocking func to return error because pl.Close was called, but got nil")
|
||||
}
|
||||
|
||||
if !blockFuncShouldError && err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-dialFinished:
|
||||
t.Fatal("expected Dial to block until pl.Close or pl.Accept")
|
||||
default:
|
||||
}
|
||||
|
||||
if err := unblockFunc(pl); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-dialFinished:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("expected Accept to unblock after pl.Accept was called")
|
||||
}
|
||||
}
|
140
vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
generated
vendored
Normal file
140
vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// bdpLimit is the maximum value the flow control windows
|
||||
// will be increased to.
|
||||
bdpLimit = (1 << 20) * 4
|
||||
// alpha is a constant factor used to keep a moving average
|
||||
// of RTTs.
|
||||
alpha = 0.9
|
||||
// If the current bdp sample is greater than or equal to
|
||||
// our beta * our estimated bdp and the current bandwidth
|
||||
// sample is the maximum bandwidth observed so far, we
|
||||
// increase our bbp estimate by a factor of gamma.
|
||||
beta = 0.66
|
||||
// To put our bdp to be smaller than or equal to twice the real BDP,
|
||||
// we should multiply our current sample with 4/3, however to round things out
|
||||
// we use 2 as the multiplication factor.
|
||||
gamma = 2
|
||||
)
|
||||
|
||||
// Adding arbitrary data to ping so that its ack can be identified.
|
||||
// Easter-egg: what does the ping message say?
|
||||
var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
|
||||
type bdpEstimator struct {
|
||||
// sentAt is the time when the ping was sent.
|
||||
sentAt time.Time
|
||||
|
||||
mu sync.Mutex
|
||||
// bdp is the current bdp estimate.
|
||||
bdp uint32
|
||||
// sample is the number of bytes received in one measurement cycle.
|
||||
sample uint32
|
||||
// bwMax is the maximum bandwidth noted so far (bytes/sec).
|
||||
bwMax float64
|
||||
// bool to keep track of the beginning of a new measurement cycle.
|
||||
isSent bool
|
||||
// Callback to update the window sizes.
|
||||
updateFlowControl func(n uint32)
|
||||
// sampleCount is the number of samples taken so far.
|
||||
sampleCount uint64
|
||||
// round trip time (seconds)
|
||||
rtt float64
|
||||
}
|
||||
|
||||
// timesnap registers the time bdp ping was sent out so that
|
||||
// network rtt can be calculated when its ack is received.
|
||||
// It is called (by controller) when the bdpPing is
|
||||
// being written on the wire.
|
||||
func (b *bdpEstimator) timesnap(d [8]byte) {
|
||||
if bdpPing.data != d {
|
||||
return
|
||||
}
|
||||
b.sentAt = time.Now()
|
||||
}
|
||||
|
||||
// add adds bytes to the current sample for calculating bdp.
|
||||
// It returns true only if a ping must be sent. This can be used
|
||||
// by the caller (handleData) to make decision about batching
|
||||
// a window update with it.
|
||||
func (b *bdpEstimator) add(n uint32) bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.bdp == bdpLimit {
|
||||
return false
|
||||
}
|
||||
if !b.isSent {
|
||||
b.isSent = true
|
||||
b.sample = n
|
||||
b.sentAt = time.Time{}
|
||||
b.sampleCount++
|
||||
return true
|
||||
}
|
||||
b.sample += n
|
||||
return false
|
||||
}
|
||||
|
||||
// calculate is called when an ack for a bdp ping is received.
|
||||
// Here we calculate the current bdp and bandwidth sample and
|
||||
// decide if the flow control windows should go up.
|
||||
func (b *bdpEstimator) calculate(d [8]byte) {
|
||||
// Check if the ping acked for was the bdp ping.
|
||||
if bdpPing.data != d {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
rttSample := time.Since(b.sentAt).Seconds()
|
||||
if b.sampleCount < 10 {
|
||||
// Bootstrap rtt with an average of first 10 rtt samples.
|
||||
b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
|
||||
} else {
|
||||
// Heed to the recent past more.
|
||||
b.rtt += (rttSample - b.rtt) * float64(alpha)
|
||||
}
|
||||
b.isSent = false
|
||||
// The number of bytes accumulated so far in the sample is smaller
|
||||
// than or equal to 1.5 times the real BDP on a saturated connection.
|
||||
bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
|
||||
if bwCurrent > b.bwMax {
|
||||
b.bwMax = bwCurrent
|
||||
}
|
||||
// If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
|
||||
// greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
|
||||
// should update our perception of the network BDP.
|
||||
if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
|
||||
sampleFloat := float64(b.sample)
|
||||
b.bdp = uint32(gamma * sampleFloat)
|
||||
if b.bdp > bdpLimit {
|
||||
b.bdp = bdpLimit
|
||||
}
|
||||
bdp := b.bdp
|
||||
b.mu.Unlock()
|
||||
b.updateFlowControl(bdp)
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
852
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
Normal file
852
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
Normal file
@ -0,0 +1,852 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
e.SetMaxDynamicTableSizeLimit(v)
|
||||
}
|
||||
|
||||
type itemNode struct {
|
||||
it interface{}
|
||||
next *itemNode
|
||||
}
|
||||
|
||||
type itemList struct {
|
||||
head *itemNode
|
||||
tail *itemNode
|
||||
}
|
||||
|
||||
func (il *itemList) enqueue(i interface{}) {
|
||||
n := &itemNode{it: i}
|
||||
if il.tail == nil {
|
||||
il.head, il.tail = n, n
|
||||
return
|
||||
}
|
||||
il.tail.next = n
|
||||
il.tail = n
|
||||
}
|
||||
|
||||
// peek returns the first item in the list without removing it from the
|
||||
// list.
|
||||
func (il *itemList) peek() interface{} {
|
||||
return il.head.it
|
||||
}
|
||||
|
||||
func (il *itemList) dequeue() interface{} {
|
||||
if il.head == nil {
|
||||
return nil
|
||||
}
|
||||
i := il.head.it
|
||||
il.head = il.head.next
|
||||
if il.head == nil {
|
||||
il.tail = nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (il *itemList) dequeueAll() *itemNode {
|
||||
h := il.head
|
||||
il.head, il.tail = nil, nil
|
||||
return h
|
||||
}
|
||||
|
||||
func (il *itemList) isEmpty() bool {
|
||||
return il.head == nil
|
||||
}
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
// registerStream is used to register an incoming stream with loopy writer.
|
||||
type registerStream struct {
|
||||
streamID uint32
|
||||
wq *writeQuota
|
||||
}
|
||||
|
||||
// headerFrame is also used to register stream on the client-side.
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
onOrphaned func(error) // Valid on client-side
|
||||
}
|
||||
|
||||
type cleanupStream struct {
|
||||
streamID uint32
|
||||
rst bool
|
||||
rstCode http2.ErrCode
|
||||
onWrite func()
|
||||
}
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
h []byte
|
||||
d []byte
|
||||
// onEachWrite is called every time
|
||||
// a part of d is written out.
|
||||
onEachWrite func()
|
||||
}
|
||||
|
||||
type incomingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type outgoingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type incomingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type outgoingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type incomingGoAway struct {
|
||||
}
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
type outFlowControlSizeRequest struct {
|
||||
resp chan uint32
|
||||
}
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
active outStreamState = iota
|
||||
empty
|
||||
waitingOnStreamQuota
|
||||
)
|
||||
|
||||
type outStream struct {
|
||||
id uint32
|
||||
state outStreamState
|
||||
itl *itemList
|
||||
bytesOutStanding int
|
||||
wq *writeQuota
|
||||
|
||||
next *outStream
|
||||
prev *outStream
|
||||
}
|
||||
|
||||
func (s *outStream) deleteSelf() {
|
||||
if s.prev != nil {
|
||||
s.prev.next = s.next
|
||||
}
|
||||
if s.next != nil {
|
||||
s.next.prev = s.prev
|
||||
}
|
||||
s.next, s.prev = nil, nil
|
||||
}
|
||||
|
||||
type outStreamList struct {
|
||||
// Following are sentinel objects that mark the
|
||||
// beginning and end of the list. They do not
|
||||
// contain any item lists. All valid objects are
|
||||
// inserted in between them.
|
||||
// This is needed so that an outStream object can
|
||||
// deleteSelf() in O(1) time without knowing which
|
||||
// list it belongs to.
|
||||
head *outStream
|
||||
tail *outStream
|
||||
}
|
||||
|
||||
func newOutStreamList() *outStreamList {
|
||||
head, tail := new(outStream), new(outStream)
|
||||
head.next = tail
|
||||
tail.prev = head
|
||||
return &outStreamList{
|
||||
head: head,
|
||||
tail: tail,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *outStreamList) enqueue(s *outStream) {
|
||||
e := l.tail.prev
|
||||
e.next = s
|
||||
s.prev = e
|
||||
s.next = l.tail
|
||||
l.tail.prev = s
|
||||
}
|
||||
|
||||
// remove from the beginning of the list.
|
||||
func (l *outStreamList) dequeue() *outStream {
|
||||
b := l.head.next
|
||||
if b == l.tail {
|
||||
return nil
|
||||
}
|
||||
b.deleteSelf()
|
||||
return b
|
||||
}
|
||||
|
||||
// controlBuffer is a way to pass information to loopy.
|
||||
// Information is passed as specific struct types called control frames.
|
||||
// A control frame not only represents data, messages or headers to be sent out
|
||||
// but can also be used to instruct loopy to update its internal state.
|
||||
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
|
||||
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
|
||||
type controlBuffer struct {
|
||||
ch chan struct{}
|
||||
done <-chan struct{}
|
||||
mu sync.Mutex
|
||||
consumerWaiting bool
|
||||
list *itemList
|
||||
err error
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
return &controlBuffer{
|
||||
ch: make(chan struct{}, 1),
|
||||
list: &itemList{},
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it interface{}) error {
|
||||
_, err := c.executeAndPut(nil, it)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if f != nil {
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if c.consumerWaiting {
|
||||
wakeUp = true
|
||||
c.consumerWaiting = false
|
||||
}
|
||||
c.list.enqueue(it)
|
||||
c.mu.Unlock()
|
||||
if wakeUp {
|
||||
select {
|
||||
case c.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Note argument f should never be nil.
|
||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return nil, c.err
|
||||
}
|
||||
if !c.list.isEmpty() {
|
||||
h := c.list.dequeue()
|
||||
c.mu.Unlock()
|
||||
return h, nil
|
||||
}
|
||||
if !block {
|
||||
c.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
c.consumerWaiting = true
|
||||
c.mu.Unlock()
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
c.finish()
|
||||
return nil, ErrConnClosing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) finish() {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.err = ErrConnClosing
|
||||
// There may be headers for streams in the control buffer.
|
||||
// These streams need to be cleaned out since the transport
|
||||
// is still not aware of these yet.
|
||||
for head := c.list.dequeueAll(); head != nil; head = head.next {
|
||||
hdr, ok := head.it.(*headerFrame)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if hdr.onOrphaned != nil { // It will be nil on the server-side.
|
||||
hdr.onOrphaned(ErrConnClosing)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type side int
|
||||
|
||||
const (
|
||||
clientSide side = iota
|
||||
serverSide
|
||||
)
|
||||
|
||||
// Loopy receives frames from the control buffer.
|
||||
// Each frame is handled individually; most of the work done by loopy goes
|
||||
// into handling data frames. Loopy maintains a queue of active streams, and each
|
||||
// stream maintains a queue of data frames; as loopy receives data frames
|
||||
// it gets added to the queue of the relevant stream.
|
||||
// Loopy goes over this list of active streams by processing one node every iteration,
|
||||
// thereby closely resemebling to a round-robin scheduling over all streams. While
|
||||
// processing a stream, loopy writes out data bytes from this stream capped by the min
|
||||
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
|
||||
type loopyWriter struct {
|
||||
side side
|
||||
cbuf *controlBuffer
|
||||
sendQuota uint32
|
||||
oiws uint32 // outbound initial window size.
|
||||
// estdStreams is map of all established streams that are not cleaned-up yet.
|
||||
// On client-side, this is all streams whose headers were sent out.
|
||||
// On server-side, this is all streams whose headers were received.
|
||||
estdStreams map[uint32]*outStream // Established streams.
|
||||
// activeStreams is a linked-list of all streams that have data to send and some
|
||||
// stream-level flow control quota.
|
||||
// Each of these streams internally have a list of data items(and perhaps trailers
|
||||
// on the server-side) to be sent out.
|
||||
activeStreams *outStreamList
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
const minBatchSize = 1000
|
||||
|
||||
// run should be run in a separate goroutine.
|
||||
// It reads control frames from controlBuf and processes them by:
|
||||
// 1. Updating loopy's internal state, or/and
|
||||
// 2. Writing out HTTP2 frames on the wire.
|
||||
//
|
||||
// Loopy keeps all active streams with data to send in a linked-list.
|
||||
// All streams in the activeStreams linked-list must have both:
|
||||
// 1. Data to send, and
|
||||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
infof("transport: loopyWriter.run returning. %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
gosched := true
|
||||
hasdata:
|
||||
for {
|
||||
it, err := l.cbuf.get(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if it != nil {
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue hasdata
|
||||
}
|
||||
isEmpty, err := l.processData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
continue hasdata
|
||||
}
|
||||
if gosched {
|
||||
gosched = false
|
||||
if l.framer.writer.offset < minBatchSize {
|
||||
runtime.Gosched()
|
||||
continue hasdata
|
||||
}
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
str.bytesOutStanding -= int(w.increment)
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
return l.framer.fr.WriteSettings(s.ss...)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
if !h.endStream {
|
||||
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||
}
|
||||
// else: Case 1.B: Server wants to close stream.
|
||||
|
||||
if str.state != empty { // either active or waiting on stream quota.
|
||||
// add it str's list of items.
|
||||
str.itl.enqueue(h)
|
||||
return nil
|
||||
}
|
||||
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.cleanupStreamHandler(h.cleanup)
|
||||
}
|
||||
// Case 2: Client wants to originate stream.
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
|
||||
if onWrite != nil {
|
||||
onWrite()
|
||||
}
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||
}
|
||||
}
|
||||
var (
|
||||
err error
|
||||
endHeaders, first bool
|
||||
)
|
||||
first = true
|
||||
for !endHeaders {
|
||||
size := l.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: streamID,
|
||||
BlockFragment: l.hBuf.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = l.framer.fr.WriteContinuation(
|
||||
streamID,
|
||||
endHeaders,
|
||||
l.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
str.itl.enqueue(df)
|
||||
if str.state == empty {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
if !p.ack {
|
||||
l.bdpEst.timesnap(p.data)
|
||||
}
|
||||
return l.framer.fr.WritePing(p.ack, p.data)
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
c.onWrite()
|
||||
if str, ok := l.estdStreams[c.streamID]; ok {
|
||||
// On the server side it could be a trailers-only response or
|
||||
// a RST_STREAM before stream initialization thus the stream might
|
||||
// not be established yet.
|
||||
delete(l.estdStreams, c.streamID)
|
||||
str.deleteSelf()
|
||||
}
|
||||
if c.rst { // If RST_STREAM needs to be sent.
|
||||
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
// Handling of outgoing GoAway is very specific to side.
|
||||
if l.ssGoAwayHandler != nil {
|
||||
draining, err := l.ssGoAwayHandler(g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.draining = draining
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
return l.incomingSettingsHandler(i)
|
||||
case *outgoingSettings:
|
||||
return l.outgoingSettingsHandler(i)
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
o := l.oiws
|
||||
l.oiws = s.Val
|
||||
if o < l.oiws {
|
||||
// If the new limit is greater make all depleted streams active.
|
||||
for _, stream := range l.estdStreams {
|
||||
if stream.state == waitingOnStreamQuota {
|
||||
stream.state = active
|
||||
l.activeStreams.enqueue(stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
case http2.SettingHeaderTableSize:
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
// of its data and then puts it at the end of activeStreams if there's still more data
|
||||
// to be sent and stream has some stream-level flow control.
|
||||
func (l *loopyWriter) processData() (bool, error) {
|
||||
if l.sendQuota == 0 {
|
||||
return true, nil
|
||||
}
|
||||
str := l.activeStreams.dequeue() // Remove the first stream.
|
||||
if str == nil {
|
||||
return true, nil
|
||||
}
|
||||
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||
// A data item is represented by a dataFrame, since it later translates into
|
||||
// multiple HTTP2 data frames.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
||||
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||
// maximum possilbe HTTP2 frame size.
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||
// Client sends out empty data frame with endStream = true
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str.itl.dequeue() // remove the empty data item from stream
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
idx int
|
||||
buf []byte
|
||||
)
|
||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
idx = 1
|
||||
buf = dataItem.d
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
if len(buf) < size {
|
||||
size = len(buf)
|
||||
}
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if strQuota < size {
|
||||
size = strQuota
|
||||
}
|
||||
|
||||
if l.sendQuota < uint32(size) { // connection-level flow control.
|
||||
size = int(l.sendQuota)
|
||||
}
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||
if dataItem.endStream && size == len(buf) {
|
||||
// buf contains either data or it contains header but data is empty.
|
||||
if idx == 1 || len(dataItem.d) == 0 {
|
||||
endStream = true
|
||||
}
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
}
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
buf = buf[size:]
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
if idx == 0 {
|
||||
dataItem.h = buf
|
||||
} else {
|
||||
dataItem.d = buf
|
||||
}
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
}
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
|
||||
str.state = waitingOnStreamQuota
|
||||
} else { // Otherwise add it back to the list of active streams.
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
49
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
Normal file
49
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultWriteQuota = 64 * 1024
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
218
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
Normal file
218
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// writeQuota is a soft limit on the amount of data a stream can
|
||||
// schedule before some of it is written out.
|
||||
type writeQuota struct {
|
||||
quota int32
|
||||
// get waits on read from when quota goes less than or equal to zero.
|
||||
// replenish writes on it when quota goes positive again.
|
||||
ch chan struct{}
|
||||
// done is triggered in error case.
|
||||
done <-chan struct{}
|
||||
// replenish is called by loopyWriter to give quota back to.
|
||||
// It is implemented as a field so that it can be updated
|
||||
// by tests.
|
||||
replenish func(n int)
|
||||
}
|
||||
|
||||
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||
w := &writeQuota{
|
||||
quota: sz,
|
||||
ch: make(chan struct{}, 1),
|
||||
done: done,
|
||||
}
|
||||
w.replenish = w.realReplenish
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *writeQuota) get(sz int32) error {
|
||||
for {
|
||||
if atomic.LoadInt32(&w.quota) > 0 {
|
||||
atomic.AddInt32(&w.quota, -sz)
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-w.ch:
|
||||
continue
|
||||
case <-w.done:
|
||||
return errStreamDone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writeQuota) realReplenish(n int) {
|
||||
sz := int32(n)
|
||||
a := atomic.AddInt32(&w.quota, sz)
|
||||
b := a - sz
|
||||
if b <= 0 && a > 0 {
|
||||
select {
|
||||
case w.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type trInFlow struct {
|
||||
limit uint32
|
||||
unacked uint32
|
||||
effectiveWindowSize uint32
|
||||
}
|
||||
|
||||
func (f *trInFlow) newLimit(n uint32) uint32 {
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.updateEffectiveWindowSize()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *trInFlow) onData(n uint32) uint32 {
|
||||
f.unacked += n
|
||||
if f.unacked >= f.limit/4 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
f.updateEffectiveWindowSize()
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *trInFlow) reset() uint32 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
|
||||
func (f *trInFlow) updateEffectiveWindowSize() {
|
||||
atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
|
||||
}
|
||||
|
||||
func (f *trInFlow) getSize() uint32 {
|
||||
return atomic.LoadUint32(&f.effectiveWindowSize)
|
||||
}
|
||||
|
||||
// TODO(mmukhi): Simplify this code.
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
mu sync.Mutex
|
||||
// The inbound flow control limit for pending data.
|
||||
limit uint32
|
||||
// pendingData is the overall data which have been received but not been
|
||||
// consumed by applications.
|
||||
pendingData uint32
|
||||
// The amount of data the application has consumed but grpc has not sent
|
||||
// window update for them. Used to reduce window update frequency.
|
||||
pendingUpdate uint32
|
||||
// delta is the extra window update given by receiver when an application
|
||||
// is reading data bigger in size than the inFlow limit.
|
||||
delta uint32
|
||||
}
|
||||
|
||||
// newLimit updates the inflow window to a new value n.
|
||||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
if n > uint32(math.MaxInt32) {
|
||||
n = uint32(math.MaxInt32)
|
||||
}
|
||||
f.mu.Lock()
|
||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||
// can send without a window update.
|
||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||
// estUntransmittedData is the maximum number of bytes the sends might not have put
|
||||
// on the wire yet. A value of 0 or less means that we have already received all or
|
||||
// more bytes than the application is requesting to read.
|
||||
estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
|
||||
// This implies that unless we send a window update, the sender won't be able to send all the bytes
|
||||
// for this message. Therefore we must send an update over the limit since there's an active read
|
||||
// request from the application.
|
||||
if estUntransmittedData > estSenderQuota {
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
|
||||
if f.limit+n > maxWindowSize {
|
||||
f.delta = maxWindowSize - f.limit
|
||||
} else {
|
||||
// Send a window update for the whole message and not just the difference between
|
||||
// estUntransmittedData and estSenderQuota. This will be helpful in case the message
|
||||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||
f.delta = n
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return f.delta
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
f.mu.Lock()
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
||||
limit := f.limit
|
||||
rcvd := f.pendingData + f.pendingUpdate
|
||||
f.mu.Unlock()
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// onRead is invoked when the application reads the data. It returns the window size
|
||||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
if f.pendingData == 0 {
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
if n > f.delta {
|
||||
n -= f.delta
|
||||
f.delta = 0
|
||||
} else {
|
||||
f.delta -= n
|
||||
n = 0
|
||||
}
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
f.mu.Unlock()
|
||||
return wu
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
52
vendor/google.golang.org/grpc/internal/transport/go16.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/internal/transport/go16.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a background context.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return context.Background()
|
||||
}
|
53
vendor/google.golang.org/grpc/internal/transport/go17.go
generated
vendored
Normal file
53
vendor/google.golang.org/grpc/internal/transport/go17.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a context from the HTTP Request.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return r.Context()
|
||||
}
|
449
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
Normal file
449
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
Normal file
@ -0,0 +1,449 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||
// uses the standard Go http2 Server implementation (via the
|
||||
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := contentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
}
|
||||
if _, ok := w.(http.CloseNotifier); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
contentType: contentType,
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
metakv := []string{"content-type", contentType}
|
||||
if r.Host != "" {
|
||||
metakv = append(metakv, ":authority", r.Host)
|
||||
}
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) && !isWhitelistedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
}
|
||||
st.headerMD = metadata.Pairs(metakv...)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// serverHandlerTransport is an implementation of ServerTransport
|
||||
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
// writes is a channel of code to run serialized in the
|
||||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
|
||||
// block concurrent WriteStatus calls
|
||||
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
|
||||
writeStatusMu sync.Mutex
|
||||
|
||||
// we just mirror the request content-type
|
||||
contentType string
|
||||
// we store both contentType and contentSubtype so we don't keep recreating them
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
type strAddr string
|
||||
|
||||
func (a strAddr) Network() string {
|
||||
if a != "" {
|
||||
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
//
|
||||
// If we want to support Unix sockets later, we can
|
||||
// add our own grpc-specific convention within the
|
||||
// grpc codebase to set RemoteAddr to a different
|
||||
// format, or probably better: we can attach it to the
|
||||
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||
return "tcp"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a strAddr) String() string { return string(a) }
|
||||
|
||||
// do runs fn in the ServeHTTP goroutine.
|
||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
|
||||
select {
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
select {
|
||||
case ht.writes <- fn:
|
||||
return nil
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
||||
ht.writeStatusMu.Lock()
|
||||
defer ht.writeStatusMu.Unlock()
|
||||
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
// first call (for example, in end2end tests's TestNoService).
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
|
||||
h := ht.rw.Header()
|
||||
h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
|
||||
if m := st.Message(); m != "" {
|
||||
h.Set("Grpc-Message", encodeGrpcMessage(m))
|
||||
}
|
||||
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
|
||||
}
|
||||
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
// http2 ResponseWriter mechanism to send undeclared Trailers after
|
||||
// the headers have possibly been written.
|
||||
h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
}
|
||||
close(ht.writes)
|
||||
}
|
||||
ht.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
// Trailers per the net/http.ResponseWriter contract.
|
||||
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
h.Add("Trailer", "Grpc-Status-Details-Bin")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.rw.Write(hdr)
|
||||
ht.rw.Write(data)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v = encodeMetadataHeader(k, v)
|
||||
h.Add(k, v)
|
||||
}
|
||||
}
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
ctx := contextFromRequest(ht.req)
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
// or the status has been written via WriteStatus.
|
||||
requestOver := make(chan struct{})
|
||||
|
||||
// clientGone receives a single value if peer is gone, either
|
||||
// because the underlying connection is dead or because the
|
||||
// peer sends an http2 RST_STREAM.
|
||||
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
||||
go func() {
|
||||
select {
|
||||
case <-requestOver:
|
||||
case <-ht.closedCh:
|
||||
case <-clientGone:
|
||||
}
|
||||
cancel()
|
||||
ht.Close()
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
requestRead: func(int) {},
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
|
||||
windowHandler: func(int) {},
|
||||
}
|
||||
|
||||
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||
readerDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readerDone)
|
||||
|
||||
// TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||
const readSize = 8196
|
||||
for buf := make([]byte, readSize); ; {
|
||||
n, err := req.Body.Read(buf)
|
||||
if n > 0 {
|
||||
s.buf.put(recvMsg{data: buf[:n:n]})
|
||||
buf = buf[n:]
|
||||
}
|
||||
if err != nil {
|
||||
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
|
||||
return
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
buf = make([]byte, readSize)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// startStream is provided by the *grpc.Server's serveStreams.
|
||||
// It starts a goroutine serving s and exits immediately.
|
||||
// The goroutine that is started is the one that then calls
|
||||
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||
startStream(s)
|
||||
|
||||
ht.runStream()
|
||||
close(requestOver)
|
||||
|
||||
// Wait for reading goroutine to finish.
|
||||
req.Body.Close()
|
||||
<-readerDone
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) runStream() {
|
||||
for {
|
||||
select {
|
||||
case fn, ok := <-ht.writes:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
fn()
|
||||
case <-ht.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return status.Error(code, se.Error())
|
||||
}
|
||||
}
|
||||
if strings.Contains(err.Error(), "body closed by handler") {
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return connectionErrorf(true, err, err.Error())
|
||||
}
|
481
vendor/google.golang.org/grpc/internal/transport/handler_server_test.go
generated
vendored
Normal file
481
vendor/google.golang.org/grpc/internal/transport/handler_server_test.go
generated
vendored
Normal file
@ -0,0 +1,481 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dpb "github.com/golang/protobuf/ptypes/duration"
|
||||
"golang.org/x/net/context"
|
||||
epb "google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
req *http.Request
|
||||
wantErr string
|
||||
modrw func(http.ResponseWriter) http.ResponseWriter
|
||||
check func(*serverHandlerTransport, *testCase) error
|
||||
}
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "http/1.1",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
},
|
||||
wantErr: "gRPC requires HTTP/2",
|
||||
},
|
||||
{
|
||||
name: "bad method",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "GET",
|
||||
Header: http.Header{},
|
||||
RequestURI: "/",
|
||||
},
|
||||
wantErr: "invalid gRPC request method",
|
||||
},
|
||||
{
|
||||
name: "bad content type",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/foo"},
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
wantErr: "invalid gRPC request content-type",
|
||||
},
|
||||
{
|
||||
name: "not flusher",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/grpc"},
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
modrw: func(w http.ResponseWriter) http.ResponseWriter {
|
||||
// Return w without its Flush method
|
||||
type onlyCloseNotifier interface {
|
||||
http.ResponseWriter
|
||||
http.CloseNotifier
|
||||
}
|
||||
return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)}
|
||||
},
|
||||
wantErr: "gRPC requires a ResponseWriter supporting http.Flusher",
|
||||
},
|
||||
{
|
||||
name: "not closenotifier",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/grpc"},
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
modrw: func(w http.ResponseWriter) http.ResponseWriter {
|
||||
// Return w without its CloseNotify method
|
||||
type onlyFlusher interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
}
|
||||
return struct{ onlyFlusher }{w.(onlyFlusher)}
|
||||
},
|
||||
wantErr: "gRPC requires a ResponseWriter supporting http.CloseNotifier",
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/grpc"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
check: func(t *serverHandlerTransport, tt *testCase) error {
|
||||
if t.req != tt.req {
|
||||
return fmt.Errorf("t.req = %p; want %p", t.req, tt.req)
|
||||
}
|
||||
if t.rw == nil {
|
||||
return errors.New("t.rw = nil; want non-nil")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with timeout",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"application/grpc"},
|
||||
"Grpc-Timeout": {"200m"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
check: func(t *serverHandlerTransport, tt *testCase) error {
|
||||
if !t.timeoutSet {
|
||||
return errors.New("timeout not set")
|
||||
}
|
||||
if want := 200 * time.Millisecond; t.timeout != want {
|
||||
return fmt.Errorf("timeout = %v; want %v", t.timeout, want)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with bad timeout",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"application/grpc"},
|
||||
"Grpc-Timeout": {"tomorrow"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
wantErr: `rpc error: code = Internal desc = malformed time-out: transport: timeout unit is not recognized: "tomorrow"`,
|
||||
},
|
||||
{
|
||||
name: "with metadata",
|
||||
req: &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"application/grpc"},
|
||||
"meta-foo": {"foo-val"},
|
||||
"meta-bar": {"bar-val1", "bar-val2"},
|
||||
"user-agent": {"x/y a/b"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
},
|
||||
check: func(ht *serverHandlerTransport, tt *testCase) error {
|
||||
want := metadata.MD{
|
||||
"meta-bar": {"bar-val1", "bar-val2"},
|
||||
"user-agent": {"x/y a/b"},
|
||||
"meta-foo": {"foo-val"},
|
||||
"content-type": {"application/grpc"},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ht.headerMD, want) {
|
||||
return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
rw := newTestHandlerResponseWriter()
|
||||
if tt.modrw != nil {
|
||||
rw = tt.modrw(rw)
|
||||
}
|
||||
got, gotErr := NewServerHandlerTransport(rw, tt.req, nil)
|
||||
if (gotErr != nil) != (tt.wantErr != "") || (gotErr != nil && gotErr.Error() != tt.wantErr) {
|
||||
t.Errorf("%s: error = %q; want %q", tt.name, gotErr.Error(), tt.wantErr)
|
||||
continue
|
||||
}
|
||||
if gotErr != nil {
|
||||
continue
|
||||
}
|
||||
if tt.check != nil {
|
||||
if err := tt.check(got.(*serverHandlerTransport), &tt); err != nil {
|
||||
t.Errorf("%s: %v", tt.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testHandlerResponseWriter struct {
|
||||
*httptest.ResponseRecorder
|
||||
closeNotify chan bool
|
||||
}
|
||||
|
||||
func (w testHandlerResponseWriter) CloseNotify() <-chan bool { return w.closeNotify }
|
||||
func (w testHandlerResponseWriter) Flush() {}
|
||||
|
||||
func newTestHandlerResponseWriter() http.ResponseWriter {
|
||||
return testHandlerResponseWriter{
|
||||
ResponseRecorder: httptest.NewRecorder(),
|
||||
closeNotify: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
type handleStreamTest struct {
|
||||
t *testing.T
|
||||
bodyw *io.PipeWriter
|
||||
rw testHandlerResponseWriter
|
||||
ht *serverHandlerTransport
|
||||
}
|
||||
|
||||
func newHandleStreamTest(t *testing.T) *handleStreamTest {
|
||||
bodyr, bodyw := io.Pipe()
|
||||
req := &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/grpc"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
Body: bodyr,
|
||||
}
|
||||
rw := newTestHandlerResponseWriter().(testHandlerResponseWriter)
|
||||
ht, err := NewServerHandlerTransport(rw, req, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &handleStreamTest{
|
||||
t: t,
|
||||
bodyw: bodyw,
|
||||
ht: ht.(*serverHandlerTransport),
|
||||
rw: rw,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams(t *testing.T) {
|
||||
st := newHandleStreamTest(t)
|
||||
handleStream := func(s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
}
|
||||
st.bodyw.Close() // no body
|
||||
st.ht.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}
|
||||
st.ht.HandleStreams(
|
||||
func(s *Stream) { go handleStream(s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
wantHeader := http.Header{
|
||||
"Date": nil,
|
||||
"Content-Type": {"application/grpc"},
|
||||
"Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"},
|
||||
"Grpc-Status": {"0"},
|
||||
}
|
||||
if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) {
|
||||
t.Errorf("Header+Trailer Map: %#v; want %#v", st.rw.HeaderMap, wantHeader)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that codes.Unimplemented will close the body, per comment in handler_server.go.
|
||||
func TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) {
|
||||
handleStreamCloseBodyTest(t, codes.Unimplemented, "thingy is unimplemented")
|
||||
}
|
||||
|
||||
// Tests that codes.InvalidArgument will close the body, per comment in handler_server.go.
|
||||
func TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) {
|
||||
handleStreamCloseBodyTest(t, codes.InvalidArgument, "bad arg")
|
||||
}
|
||||
|
||||
func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) {
|
||||
st := newHandleStreamTest(t)
|
||||
|
||||
handleStream := func(s *Stream) {
|
||||
st.ht.WriteStatus(s, status.New(statusCode, msg))
|
||||
}
|
||||
st.ht.HandleStreams(
|
||||
func(s *Stream) { go handleStream(s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
wantHeader := http.Header{
|
||||
"Date": nil,
|
||||
"Content-Type": {"application/grpc"},
|
||||
"Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"},
|
||||
"Grpc-Status": {fmt.Sprint(uint32(statusCode))},
|
||||
"Grpc-Message": {encodeGrpcMessage(msg)},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) {
|
||||
t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", st.rw.HeaderMap, wantHeader)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) {
|
||||
bodyr, bodyw := io.Pipe()
|
||||
req := &http.Request{
|
||||
ProtoMajor: 2,
|
||||
Method: "POST",
|
||||
Header: http.Header{
|
||||
"Content-Type": {"application/grpc"},
|
||||
"Grpc-Timeout": {"200m"},
|
||||
},
|
||||
URL: &url.URL{
|
||||
Path: "/service/foo.bar",
|
||||
},
|
||||
RequestURI: "/service/foo.bar",
|
||||
Body: bodyr,
|
||||
}
|
||||
rw := newTestHandlerResponseWriter().(testHandlerResponseWriter)
|
||||
ht, err := NewServerHandlerTransport(rw, req, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
runStream := func(s *Stream) {
|
||||
defer bodyw.Close()
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("timeout waiting for ctx.Done")
|
||||
return
|
||||
}
|
||||
err := s.ctx.Err()
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("ctx.Err = %v; want %v", err, context.DeadlineExceeded)
|
||||
return
|
||||
}
|
||||
ht.WriteStatus(s, status.New(codes.DeadlineExceeded, "too slow"))
|
||||
}
|
||||
ht.HandleStreams(
|
||||
func(s *Stream) { go runStream(s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
wantHeader := http.Header{
|
||||
"Date": nil,
|
||||
"Content-Type": {"application/grpc"},
|
||||
"Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"},
|
||||
"Grpc-Status": {"4"},
|
||||
"Grpc-Message": {encodeGrpcMessage("too slow")},
|
||||
}
|
||||
if !reflect.DeepEqual(rw.HeaderMap, wantHeader) {
|
||||
t.Errorf("Header+Trailer Map mismatch.\n got: %#v\nwant: %#v", rw.HeaderMap, wantHeader)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandlerTransport_HandleStreams_MultiWriteStatus ensures that
|
||||
// concurrent "WriteStatus"s do not panic writing to closed "writes" channel.
|
||||
func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {
|
||||
testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
}
|
||||
st.bodyw.Close() // no body
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(5)
|
||||
for i := 0; i < 5; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
st.ht.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// TestHandlerTransport_HandleStreams_WriteStatusWrite ensures that "Write"
|
||||
// following "WriteStatus" does not panic writing to closed "writes" channel.
|
||||
func TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) {
|
||||
testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
}
|
||||
st.bodyw.Close() // no body
|
||||
|
||||
st.ht.WriteStatus(s, status.New(codes.OK, ""))
|
||||
st.ht.Write(s, []byte("hdr"), []byte("data"), &Options{})
|
||||
})
|
||||
}
|
||||
|
||||
func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handleStreamTest, s *Stream)) {
|
||||
st := newHandleStreamTest(t)
|
||||
st.ht.HandleStreams(
|
||||
func(s *Stream) { go handleStream(st, s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) {
|
||||
errDetails := []proto.Message{
|
||||
&epb.RetryInfo{
|
||||
RetryDelay: &dpb.Duration{Seconds: 60},
|
||||
},
|
||||
&epb.ResourceInfo{
|
||||
ResourceType: "foo bar",
|
||||
ResourceName: "service.foo.bar",
|
||||
Owner: "User",
|
||||
},
|
||||
}
|
||||
|
||||
statusCode := codes.ResourceExhausted
|
||||
msg := "you are being throttled"
|
||||
st, err := status.New(statusCode, msg).WithDetails(errDetails...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stBytes, err := proto.Marshal(st.Proto())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hst := newHandleStreamTest(t)
|
||||
handleStream := func(s *Stream) {
|
||||
hst.ht.WriteStatus(s, st)
|
||||
}
|
||||
hst.ht.HandleStreams(
|
||||
func(s *Stream) { go handleStream(s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
wantHeader := http.Header{
|
||||
"Date": nil,
|
||||
"Content-Type": {"application/grpc"},
|
||||
"Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"},
|
||||
"Grpc-Status": {fmt.Sprint(uint32(statusCode))},
|
||||
"Grpc-Message": {encodeGrpcMessage(msg)},
|
||||
"Grpc-Status-Details-Bin": {encodeBinHeader(stBytes)},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(hst.rw.HeaderMap, wantHeader) {
|
||||
t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", hst.rw.HeaderMap, wantHeader)
|
||||
}
|
||||
}
|
1368
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
1368
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1180
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
Normal file
1180
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
623
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
Normal file
623
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
Normal file
@ -0,0 +1,623 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
baseContentType = "application/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeStreamClosed: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
http2.ErrCodeCompression: codes.Internal,
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
codes.Canceled: http2.ErrCodeCancel,
|
||||
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||
}
|
||||
httpStatusConvTab = map[int]codes.Code{
|
||||
// 400 Bad Request - INTERNAL.
|
||||
http.StatusBadRequest: codes.Internal,
|
||||
// 401 Unauthorized - UNAUTHENTICATED.
|
||||
http.StatusUnauthorized: codes.Unauthenticated,
|
||||
// 403 Forbidden - PERMISSION_DENIED.
|
||||
http.StatusForbidden: codes.PermissionDenied,
|
||||
// 404 Not Found - UNIMPLEMENTED.
|
||||
http.StatusNotFound: codes.Unimplemented,
|
||||
// 429 Too Many Requests - UNAVAILABLE.
|
||||
http.StatusTooManyRequests: codes.Unavailable,
|
||||
// 502 Bad Gateway - UNAVAILABLE.
|
||||
http.StatusBadGateway: codes.Unavailable,
|
||||
// 503 Service Unavailable - UNAVAILABLE.
|
||||
http.StatusServiceUnavailable: codes.Unavailable,
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
)
|
||||
|
||||
// Records the states during HPACK decoding. Must be reset once the
|
||||
// decoding of the entire headers are finished.
|
||||
type decodeState struct {
|
||||
encoding string
|
||||
// statusGen caches the stream status received from the trailer the server
|
||||
// sent. Client side only. Do not access directly. After all trailers are
|
||||
// parsed, use the status method to retrieve the status.
|
||||
statusGen *status.Status
|
||||
// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
|
||||
// intended for direct access outside of parsing.
|
||||
rawStatusCode *int
|
||||
rawStatusMsg string
|
||||
httpStatus *int
|
||||
// Server side only fields.
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
method string
|
||||
// key-value metadata map from the peer.
|
||||
mdata map[string][]string
|
||||
statsTags []byte
|
||||
statsTrace []byte
|
||||
contentSubtype string
|
||||
// whether decoding on server side or not
|
||||
serverSide bool
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
func isReservedHeader(hdr string) bool {
|
||||
if hdr != "" && hdr[0] == ':' {
|
||||
return true
|
||||
}
|
||||
switch hdr {
|
||||
case "content-type",
|
||||
"user-agent",
|
||||
"grpc-message-type",
|
||||
"grpc-encoding",
|
||||
"grpc-message",
|
||||
"grpc-status",
|
||||
"grpc-timeout",
|
||||
"grpc-status-details-bin",
|
||||
// Intentionally exclude grpc-previous-rpc-attempts and
|
||||
// grpc-retry-pushback-ms, which are "reserved", but their API
|
||||
// intentionally works via metadata.
|
||||
"te":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitelistedHeader checks whether hdr should be propagated into metadata
|
||||
// visible to users, even though it is classified as "reserved", above.
|
||||
func isWhitelistedHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case ":authority", "user-agent":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func contentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func contentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
||||
|
||||
func (d *decodeState) status() *status.Status {
|
||||
if d.statusGen == nil {
|
||||
// No status-details were provided; generate status using code/msg.
|
||||
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
|
||||
}
|
||||
return d.statusGen
|
||||
}
|
||||
|
||||
const binHdrSuffix = "-bin"
|
||||
|
||||
func encodeBinHeader(v []byte) string {
|
||||
return base64.RawStdEncoding.EncodeToString(v)
|
||||
}
|
||||
|
||||
func decodeBinHeader(v string) ([]byte, error) {
|
||||
if len(v)%4 == 0 {
|
||||
// Input was padded, or padding was not necessary.
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
return base64.RawStdEncoding.DecodeString(v)
|
||||
}
|
||||
|
||||
func encodeMetadataHeader(k, v string) string {
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
return encodeBinHeader(([]byte)(v))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func decodeMetadataHeader(k, v string) (string, error) {
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
b, err := decodeBinHeader(v)
|
||||
return string(b), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
return status.Error(codes.Internal, "peer header list size exceeded limit")
|
||||
}
|
||||
for _, hf := range frame.Fields {
|
||||
if err := d.processHeaderField(hf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.serverSide {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If grpc status exists, no need to check further.
|
||||
if d.rawStatusCode != nil || d.statusGen != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If grpc status doesn't exist and http status doesn't exist,
|
||||
// then it's a malformed header.
|
||||
if d.httpStatus == nil {
|
||||
return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
|
||||
}
|
||||
|
||||
if *(d.httpStatus) != http.StatusOK {
|
||||
code, ok := httpStatusConvTab[*(d.httpStatus)]
|
||||
if !ok {
|
||||
code = codes.Unknown
|
||||
}
|
||||
return status.Error(code, http.StatusText(*(d.httpStatus)))
|
||||
}
|
||||
|
||||
// gRPC status doesn't exist and http status is OK.
|
||||
// Set rawStatusCode to be unknown and return nil error.
|
||||
// So that, if the stream has ended this Unknown status
|
||||
// will be propagated to the user.
|
||||
// Otherwise, it will be ignored. In which case, status from
|
||||
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||
code := int(codes.Unknown)
|
||||
d.rawStatusCode = &code
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decodeState) addMetadata(k, v string) {
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
}
|
||||
d.mdata[k] = append(d.mdata[k], v)
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||
if !validContentType {
|
||||
return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
||||
}
|
||||
d.contentSubtype = contentSubtype
|
||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||
// or come up with a way to just propagate the content-subtype if it was set?
|
||||
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||
// in the metadata?
|
||||
d.addMetadata(f.Name, f.Value)
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
}
|
||||
d.rawStatusCode = &code
|
||||
case "grpc-message":
|
||||
d.rawStatusMsg = decodeGrpcMessage(f.Value)
|
||||
case "grpc-status-details-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
}
|
||||
s := &spb.Status{}
|
||||
if err := proto.Unmarshal(v, s); err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
}
|
||||
d.statusGen = status.FromProto(s)
|
||||
case "grpc-timeout":
|
||||
d.timeoutSet = true
|
||||
var err error
|
||||
if d.timeout, err = decodeTimeout(f.Value); err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
}
|
||||
case ":path":
|
||||
d.method = f.Value
|
||||
case ":status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
|
||||
}
|
||||
d.httpStatus = &code
|
||||
case "grpc-tags-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
|
||||
}
|
||||
d.statsTags = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
case "grpc-trace-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
|
||||
}
|
||||
d.statsTrace = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
default:
|
||||
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||
if err != nil {
|
||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
return nil
|
||||
}
|
||||
d.addMetadata(f.Name, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
|
||||
const (
|
||||
hour timeoutUnit = 'H'
|
||||
minute timeoutUnit = 'M'
|
||||
second timeoutUnit = 'S'
|
||||
millisecond timeoutUnit = 'm'
|
||||
microsecond timeoutUnit = 'u'
|
||||
nanosecond timeoutUnit = 'n'
|
||||
)
|
||||
|
||||
func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case hour:
|
||||
return time.Hour, true
|
||||
case minute:
|
||||
return time.Minute, true
|
||||
case second:
|
||||
return time.Second, true
|
||||
case millisecond:
|
||||
return time.Millisecond, true
|
||||
case microsecond:
|
||||
return time.Microsecond, true
|
||||
case nanosecond:
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if m := d % r; m > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
|
||||
func encodeTimeout(t time.Duration) string {
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
||||
|
||||
func decodeTimeout(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
||||
}
|
||||
if size > 9 {
|
||||
// Spec allows for 8 digits plus the unit.
|
||||
return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
|
||||
}
|
||||
unit := timeoutUnit(s[size-1])
|
||||
d, ok := timeoutUnitToDuration(unit)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
const maxHours = math.MaxInt64 / int64(time.Hour)
|
||||
if d == time.Hour && t > maxHours {
|
||||
// This timeout would overflow math.MaxInt64; clamp it.
|
||||
return time.Duration(math.MaxInt64), nil
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
const (
|
||||
spaceByte = ' '
|
||||
tildeByte = '~'
|
||||
percentByte = '%'
|
||||
)
|
||||
|
||||
// encodeGrpcMessage is used to encode status code in header field
|
||||
// "grpc-message". It does percent encoding and also replaces invalid utf-8
|
||||
// characters with Unicode replacement character.
|
||||
//
|
||||
// It checks to see if each individual byte in msg is an allowable byte, and
|
||||
// then either percent encoding or passing it through. When percent encoding,
|
||||
// the byte is converted into hexadecimal notation with a '%' prepended.
|
||||
func encodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
|
||||
return encodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
continue
|
||||
}
|
||||
|
||||
// The for loop is necessary even if size == 1. r could be
|
||||
// utf8.RuneError.
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
buf.WriteByte(b)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||
func decodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
if msg[i] == percentByte && i+2 < lenMsg {
|
||||
return decodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func decodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c == percentByte && i+2 < lenMsg {
|
||||
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
||||
if err != nil {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte(byte(parsed))
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
buf.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
buf []byte
|
||||
offset int
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
return &bufWriter{
|
||||
buf: make([]byte, batchSize*2),
|
||||
batchSize: batchSize,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
b = b[nn:]
|
||||
w.offset += nn
|
||||
n += nn
|
||||
if w.offset >= w.batchSize {
|
||||
err = w.Flush()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bufWriter) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||
if writeBufferSize < 0 {
|
||||
writeBufferSize = 0
|
||||
}
|
||||
var r io.Reader = conn
|
||||
if readBufferSize > 0 {
|
||||
r = bufio.NewReaderSize(r, readBufferSize)
|
||||
}
|
||||
w := newBufWriter(conn, writeBufferSize)
|
||||
f := &framer{
|
||||
writer: w,
|
||||
fr: http2.NewFramer(w, r),
|
||||
}
|
||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||
f.fr.SetReuseFrames()
|
||||
f.fr.MaxHeaderListSize = maxHeaderListSize
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
237
vendor/google.golang.org/grpc/internal/transport/http_util_test.go
generated
vendored
Normal file
237
vendor/google.golang.org/grpc/internal/transport/http_util_test.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTimeoutEncode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"12345678ns", "12345678n"},
|
||||
{"123456789ns", "123457u"},
|
||||
{"12345678us", "12345678u"},
|
||||
{"123456789us", "123457m"},
|
||||
{"12345678ms", "12345678m"},
|
||||
{"123456789ms", "123457S"},
|
||||
{"12345678s", "12345678S"},
|
||||
{"123456789s", "2057614M"},
|
||||
{"12345678m", "12345678M"},
|
||||
{"123456789m", "2057614H"},
|
||||
} {
|
||||
d, err := time.ParseDuration(test.in)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse duration string %s: %v", test.in, err)
|
||||
}
|
||||
out := encodeTimeout(d)
|
||||
if out != test.out {
|
||||
t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeoutDecode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
s string
|
||||
// output
|
||||
d time.Duration
|
||||
err error
|
||||
}{
|
||||
{"1234S", time.Second * 1234, nil},
|
||||
{"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")},
|
||||
{"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")},
|
||||
{"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")},
|
||||
} {
|
||||
d, err := decodeTimeout(test.s)
|
||||
if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) {
|
||||
t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentSubtype(t *testing.T) {
|
||||
tests := []struct {
|
||||
contentType string
|
||||
want string
|
||||
wantValid bool
|
||||
}{
|
||||
{"application/grpc", "", true},
|
||||
{"application/grpc+", "", true},
|
||||
{"application/grpc+blah", "blah", true},
|
||||
{"application/grpc;", "", true},
|
||||
{"application/grpc;blah", "blah", true},
|
||||
{"application/grpcd", "", false},
|
||||
{"application/grpd", "", false},
|
||||
{"application/grp", "", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, gotValid := contentSubtype(tt.contentType)
|
||||
if got != tt.want || gotValid != tt.wantValid {
|
||||
t.Errorf("contentSubtype(%q) = (%v, %v); want (%v, %v)", tt.contentType, got, gotValid, tt.want, tt.wantValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeGrpcMessage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Hello", "Hello"},
|
||||
{"\u0000", "%00"},
|
||||
{"%", "%25"},
|
||||
{"系统", "%E7%B3%BB%E7%BB%9F"},
|
||||
{string([]byte{0xff, 0xfe, 0xfd}), "%EF%BF%BD%EF%BF%BD%EF%BF%BD"},
|
||||
} {
|
||||
actual := encodeGrpcMessage(tt.input)
|
||||
if tt.expected != actual {
|
||||
t.Errorf("encodeGrpcMessage(%q) = %q, want %q", tt.input, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that all the visible ASCII chars except '%' are not percent encoded.
|
||||
for i := ' '; i <= '~' && i != '%'; i++ {
|
||||
output := encodeGrpcMessage(string(i))
|
||||
if output != string(i) {
|
||||
t.Errorf("encodeGrpcMessage(%v) = %v, want %v", string(i), output, string(i))
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that all the invisible ASCII chars and '%' are percent encoded.
|
||||
for i := rune(0); i == '%' || (i >= rune(0) && i < ' ') || (i > '~' && i <= rune(127)); i++ {
|
||||
output := encodeGrpcMessage(string(i))
|
||||
expected := fmt.Sprintf("%%%02X", i)
|
||||
if output != expected {
|
||||
t.Errorf("encodeGrpcMessage(%v) = %v, want %v", string(i), output, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeGrpcMessage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Hello", "Hello"},
|
||||
{"H%61o", "Hao"},
|
||||
{"H%6", "H%6"},
|
||||
{"%G0", "%G0"},
|
||||
{"%E7%B3%BB%E7%BB%9F", "系统"},
|
||||
{"%EF%BF%BD", "<22>"},
|
||||
} {
|
||||
actual := decodeGrpcMessage(tt.input)
|
||||
if tt.expected != actual {
|
||||
t.Errorf("decodeGrpcMessage(%q) = %q, want %q", tt.input, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that all the visible ASCII chars except '%' are not percent decoded.
|
||||
for i := ' '; i <= '~' && i != '%'; i++ {
|
||||
output := decodeGrpcMessage(string(i))
|
||||
if output != string(i) {
|
||||
t.Errorf("decodeGrpcMessage(%v) = %v, want %v", string(i), output, string(i))
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that all the invisible ASCII chars and '%' are percent decoded.
|
||||
for i := rune(0); i == '%' || (i >= rune(0) && i < ' ') || (i > '~' && i <= rune(127)); i++ {
|
||||
output := decodeGrpcMessage(fmt.Sprintf("%%%02X", i))
|
||||
if output != string(i) {
|
||||
t.Errorf("decodeGrpcMessage(%v) = %v, want %v", fmt.Sprintf("%%%02X", i), output, string(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode an encoded string should get the same thing back, except for invalid
|
||||
// utf8 chars.
|
||||
func TestDecodeEncodeGrpcMessage(t *testing.T) {
|
||||
testCases := []struct {
|
||||
orig string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"hello", "hello"},
|
||||
{"h%6", "h%6"},
|
||||
{"%G0", "%G0"},
|
||||
{"系统", "系统"},
|
||||
{"Hello, 世界", "Hello, 世界"},
|
||||
|
||||
{string([]byte{0xff, 0xfe, 0xfd}), "<22><><EFBFBD>"},
|
||||
{string([]byte{0xff}) + "Hello" + string([]byte{0xfe}) + "世界" + string([]byte{0xfd}), "<22>Hello<6C>世界<E4B896>"},
|
||||
}
|
||||
for _, tC := range testCases {
|
||||
got := decodeGrpcMessage(encodeGrpcMessage(tC.orig))
|
||||
if got != tC.want {
|
||||
t.Errorf("decodeGrpcMessage(encodeGrpcMessage(%q)) = %q, want %q", tC.orig, got, tC.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const binaryValue = string(128)
|
||||
|
||||
func TestEncodeMetadataHeader(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
kin string
|
||||
vin string
|
||||
// output
|
||||
vout string
|
||||
}{
|
||||
{"key", "abc", "abc"},
|
||||
{"KEY", "abc", "abc"},
|
||||
{"key-bin", "abc", "YWJj"},
|
||||
{"key-bin", binaryValue, "woA"},
|
||||
} {
|
||||
v := encodeMetadataHeader(test.kin, test.vin)
|
||||
if !reflect.DeepEqual(v, test.vout) {
|
||||
t.Fatalf("encodeMetadataHeader(%q, %q) = %q, want %q", test.kin, test.vin, v, test.vout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeMetadataHeader(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
// input
|
||||
kin string
|
||||
vin string
|
||||
// output
|
||||
vout string
|
||||
err error
|
||||
}{
|
||||
{"a", "abc", "abc", nil},
|
||||
{"key-bin", "Zm9vAGJhcg==", "foo\x00bar", nil},
|
||||
{"key-bin", "Zm9vAGJhcg", "foo\x00bar", nil},
|
||||
{"key-bin", "woA=", binaryValue, nil},
|
||||
{"a", "abc,efg", "abc,efg", nil},
|
||||
} {
|
||||
v, err := decodeMetadataHeader(test.kin, test.vin)
|
||||
if !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) {
|
||||
t.Fatalf("decodeMetadataHeader(%q, %q) = %q, %v, want %q, %v", test.kin, test.vin, v, err, test.vout, test.err)
|
||||
}
|
||||
}
|
||||
}
|
44
vendor/google.golang.org/grpc/internal/transport/log.go
generated
vendored
Normal file
44
vendor/google.golang.org/grpc/internal/transport/log.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file contains wrappers for grpclog functions.
|
||||
// The transport package only logs to verbose level 2 by default.
|
||||
|
||||
package transport
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
func infof(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func warningf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Warningf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Errorf(format, args...)
|
||||
}
|
||||
}
|
712
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
Normal file
712
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
Normal file
@ -0,0 +1,712 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package transport defines and implements message oriented communication
|
||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||
// grpc-internal usage and is not intended to be imported directly by users.
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/tap"
|
||||
)
|
||||
|
||||
// recvMsg represents the received msg from the transport. All transport
|
||||
// protocol specific info has been removed.
|
||||
type recvMsg struct {
|
||||
data []byte
|
||||
// nil: received some data
|
||||
// io.EOF: stream is completed. data is nil.
|
||||
// other non-nil error: transport failure. data is nil.
|
||||
err error
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
// Note recvBuffer differs from controlBuffer only in that recvBuffer
|
||||
// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
|
||||
// recvBuffer is written to much more often than
|
||||
// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
backlog []recvMsg
|
||||
err error
|
||||
}
|
||||
|
||||
func newRecvBuffer() *recvBuffer {
|
||||
b := &recvBuffer{
|
||||
c: make(chan recvMsg, 1),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *recvBuffer) put(r recvMsg) {
|
||||
b.mu.Lock()
|
||||
if b.err != nil {
|
||||
b.mu.Unlock()
|
||||
// An error had occurred earlier, don't accept more
|
||||
// data or errors.
|
||||
return
|
||||
}
|
||||
b.err = r.err
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = recvMsg{}
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
//
|
||||
// Upon receipt of a recvMsg, the caller should call load to send another
|
||||
// recvMsg onto the channel if there is any.
|
||||
func (b *recvBuffer) get() <-chan recvMsg {
|
||||
return b.c
|
||||
}
|
||||
|
||||
//
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last []byte // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||
// read additional data from recv. It blocks if there no additional data available
|
||||
// in recv. If Read returns any non-nil error, it will continue to return that error.
|
||||
func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
n, r.err = r.read(p)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
if r.last != nil && len(r.last) > 0 {
|
||||
// Read remaining data left in last call.
|
||||
copied := copy(p, r.last)
|
||||
r.last = r.last[copied:]
|
||||
return copied, nil
|
||||
}
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case m := <-r.recv.get():
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
return 0, m.err
|
||||
}
|
||||
copied := copy(p, m.data)
|
||||
r.last = m.data[copied:]
|
||||
return copied, nil
|
||||
}
|
||||
}
|
||||
|
||||
type streamState uint32
|
||||
|
||||
const (
|
||||
streamActive streamState = iota
|
||||
streamWriteDone // EndStream sent
|
||||
streamReadDone // EndStream received
|
||||
streamDone // the entire stream is finished.
|
||||
)
|
||||
|
||||
// Stream represents an RPC in the transport layer.
|
||||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||
method string // the associated RPC method of the stream
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
trReader io.Reader
|
||||
fc *inFlow
|
||||
wq *writeQuota
|
||||
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
header metadata.MD // the received header metadata.
|
||||
trailer metadata.MD // the key-value map of trailer metadata.
|
||||
|
||||
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
||||
|
||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
||||
headerSent uint32
|
||||
|
||||
state streamState
|
||||
|
||||
// On client-side it is the status error received from the server.
|
||||
// On server-side it is unused.
|
||||
status *status.Status
|
||||
|
||||
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
||||
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
||||
|
||||
// contentSubtype is the content-subtype for requests.
|
||||
// this must be lowercase or the behavior is undefined.
|
||||
contentSubtype string
|
||||
}
|
||||
|
||||
// isHeaderSent is only valid on the server-side.
|
||||
func (s *Stream) isHeaderSent() bool {
|
||||
return atomic.LoadUint32(&s.headerSent) == 1
|
||||
}
|
||||
|
||||
// updateHeaderSent updates headerSent and returns true
|
||||
// if it was alreay set. It is valid only on server-side.
|
||||
func (s *Stream) updateHeaderSent() bool {
|
||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||
}
|
||||
|
||||
func (s *Stream) swapState(st streamState) streamState {
|
||||
return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
|
||||
}
|
||||
|
||||
func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
|
||||
}
|
||||
|
||||
func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
// from the server.
|
||||
func (s *Stream) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is canceled/expired.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// if !headerDone, some other connection error occurred.
|
||||
return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
// It can be safely read only after stream has ended that is either read
|
||||
// or write have returned io.EOF.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
c := s.trailer.Copy()
|
||||
return c
|
||||
}
|
||||
|
||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". This will always be lowercase. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
func (s *Stream) ContentSubtype() string {
|
||||
return s.contentSubtype
|
||||
}
|
||||
|
||||
// Context returns the context of the stream.
|
||||
func (s *Stream) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *Stream) Method() string {
|
||||
return s.method
|
||||
}
|
||||
|
||||
// Status returns the status received from the server.
|
||||
// Status can be read safely only after the stream has ended,
|
||||
// that is, after Done() is closed.
|
||||
func (s *Stream) Status() *status.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata. This can be called multiple times.
|
||||
// Server side only.
|
||||
// This should not be called in parallel to other data writes.
|
||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sends the given header metadata. The given metadata is
|
||||
// combined with any metadata set by previous calls to SetHeader and
|
||||
// then written to the transport stream.
|
||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||
return s.st.WriteHeader(s, md)
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||
// by the server. This can be called multiple times. Server side only.
|
||||
// This should not be called parallel to other data writes.
|
||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Stream) write(m recvMsg) {
|
||||
s.buf.put(m)
|
||||
}
|
||||
|
||||
// Read reads all p bytes from the wire for this stream.
|
||||
func (s *Stream) Read(p []byte) (n int, err error) {
|
||||
// Don't request a read if there was an error earlier
|
||||
if er := s.trReader.(*transportReader).er; er != nil {
|
||||
return 0, er
|
||||
}
|
||||
s.requestRead(len(p))
|
||||
return io.ReadFull(s.trReader, p)
|
||||
}
|
||||
|
||||
// tranportReader reads all the data available for this Stream from the transport and
|
||||
// passes them into the decoder, which converts them into a gRPC message stream.
|
||||
// The error is io.EOF when the stream is done or another non-nil error if
|
||||
// the stream broke.
|
||||
type transportReader struct {
|
||||
reader io.Reader
|
||||
// The handler to control the window update procedure for both this
|
||||
// particular stream and the associated transport.
|
||||
windowHandler func(int)
|
||||
er error
|
||||
}
|
||||
|
||||
func (t *transportReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
if err != nil {
|
||||
t.er = err
|
||||
return
|
||||
}
|
||||
t.windowHandler(n)
|
||||
return
|
||||
}
|
||||
|
||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||
func (s *Stream) BytesReceived() bool {
|
||||
return atomic.LoadUint32(&s.bytesReceived) == 1
|
||||
}
|
||||
|
||||
// Unprocessed indicates whether the server did not process this stream --
|
||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||
func (s *Stream) Unprocessed() bool {
|
||||
return atomic.LoadUint32(&s.unprocessed) == 1
|
||||
}
|
||||
|
||||
// GoString is implemented by Stream so context.String() won't
|
||||
// race when printing %#v.
|
||||
func (s *Stream) GoString() string {
|
||||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||
}
|
||||
|
||||
// state of transport
|
||||
type transportState int
|
||||
|
||||
const (
|
||||
reachable transportState = iota
|
||||
closing
|
||||
draining
|
||||
)
|
||||
|
||||
// ServerConfig consists of all the configurations to establish a server transport.
|
||||
type ServerConfig struct {
|
||||
MaxStreams uint32
|
||||
AuthInfo credentials.AuthInfo
|
||||
InTapHandle tap.ServerInHandle
|
||||
StatsHandler stats.Handler
|
||||
KeepaliveParams keepalive.ServerParameters
|
||||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
// if it fails.
|
||||
func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
|
||||
return newHTTP2Server(conn, config)
|
||||
}
|
||||
|
||||
// ConnectOptions covers all relevant options for communicating with the server.
|
||||
type ConnectOptions struct {
|
||||
// UserAgent is the application user agent.
|
||||
UserAgent string
|
||||
// Dialer specifies how to dial a network address.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
||||
FailOnNonTempDialError bool
|
||||
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
||||
PerRPCCredentials []credentials.PerRPCCredentials
|
||||
// TransportCredentials stores the Authenticator required to setup a client
|
||||
// connection. Only one of TransportCredentials and CredsBundle is non-nil.
|
||||
TransportCredentials credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle to be used. Only one of
|
||||
// TransportCredentials and CredsBundle is non-nil.
|
||||
CredsBundle credentials.Bundle
|
||||
// KeepaliveParams stores the keepalive parameters.
|
||||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
InitialConnWindowSize int32
|
||||
// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID int64
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
type TargetInfo struct {
|
||||
Addr string
|
||||
Metadata interface{}
|
||||
Authority string
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess, onGoAway, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
// transmission.
|
||||
type Options struct {
|
||||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
type CallHdr struct {
|
||||
// Host specifies the peer's host.
|
||||
Host string
|
||||
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
||||
// Creds specifies credentials.PerRPCCredentials for a call.
|
||||
Creds credentials.PerRPCCredentials
|
||||
|
||||
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||
// lowercase, otherwise the behavior is undefined. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
ContentSubtype string
|
||||
|
||||
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
// implementations.
|
||||
type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
// should not be accessed any more. The caller must make sure this
|
||||
// is called only once.
|
||||
Close() error
|
||||
|
||||
// GracefulClose starts to tear down the transport. It stops accepting
|
||||
// new RPCs and wait the completion of the pending RPCs.
|
||||
GracefulClose() error
|
||||
|
||||
// Write sends the data for the given stream. A nil stream indicates
|
||||
// the write is to be performed on the transport as a whole.
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// NewStream creates a Stream for an RPC.
|
||||
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
||||
|
||||
// CloseStream clears the footprint of a stream when the stream is
|
||||
// not needed any more. The err indicates the error incurred when
|
||||
// CloseStream is called. Must be called when a stream is finished
|
||||
// unless the associated transport is closing.
|
||||
CloseStream(stream *Stream, err error)
|
||||
|
||||
// Error returns a channel that is closed when some I/O error
|
||||
// happens. Typically the caller should have a goroutine to monitor
|
||||
// this in order to take action (e.g., close the current transport
|
||||
// and create a new one) in error case. It should not return nil
|
||||
// once the transport is initiated.
|
||||
Error() <-chan struct{}
|
||||
|
||||
// GoAway returns a channel that is closed when ClientTransport
|
||||
// receives the draining signal from the server (e.g., GOAWAY frame in
|
||||
// HTTP/2).
|
||||
GoAway() <-chan struct{}
|
||||
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||
GetGoAwayReason() GoAwayReason
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
// implementations.
|
||||
//
|
||||
// Methods may be called concurrently from multiple goroutines, but
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream), func(context.Context, string) context.Context)
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client. WriteStatus is
|
||||
// the final call made on a stream and always occurs.
|
||||
WriteStatus(s *Stream, st *status.Status) error
|
||||
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close() error
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||
return ConnectionError{
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
temp: temp,
|
||||
err: e,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection and the retry of all the active streams.
|
||||
type ConnectionError struct {
|
||||
Desc string
|
||||
temp bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (e ConnectionError) Error() string {
|
||||
return fmt.Sprintf("connection error: desc = %q", e.Desc)
|
||||
}
|
||||
|
||||
// Temporary indicates if this connection error is temporary or fatal.
|
||||
func (e ConnectionError) Temporary() bool {
|
||||
return e.temp
|
||||
}
|
||||
|
||||
// Origin returns the original error of this connection error.
|
||||
func (e ConnectionError) Origin() error {
|
||||
// Never return nil error here.
|
||||
// If the original error is nil, return itself.
|
||||
if e.err == nil {
|
||||
return e
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
// errStreamDrain indicates that the stream is rejected because the
|
||||
// connection is draining. This could be caused by goaway or balancer
|
||||
// removing the address.
|
||||
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
|
||||
// errStreamDone is returned from write at the client side to indiacte application
|
||||
// layer of an error.
|
||||
errStreamDone = errors.New("the stream is done")
|
||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||
// stream's ID in unprocessed RPCs.
|
||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||
)
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
type GoAwayReason uint8
|
||||
|
||||
const (
|
||||
// GoAwayInvalid indicates that no GoAway frame is received.
|
||||
GoAwayInvalid GoAwayReason = 0
|
||||
// GoAwayNoReason is the default value when GoAway frame is received.
|
||||
GoAwayNoReason GoAwayReason = 1
|
||||
// GoAwayTooManyPings indicates that a GoAway frame with
|
||||
// ErrCodeEnhanceYourCalm was received and that the debug data said
|
||||
// "too_many_pings".
|
||||
GoAwayTooManyPings GoAwayReason = 2
|
||||
)
|
||||
|
||||
// channelzData is used to store channelz related data for http2Client and http2Server.
|
||||
// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
|
||||
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||
type channelzData struct {
|
||||
kpCount int64
|
||||
// The number of streams that have started, including already finished ones.
|
||||
streamsStarted int64
|
||||
// Client side: The number of streams that have ended successfully by receiving
|
||||
// EoS bit set frame from server.
|
||||
// Server side: The number of streams that have ended successfully by sending
|
||||
// frame with EoS bit set.
|
||||
streamsSucceeded int64
|
||||
streamsFailed int64
|
||||
// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
|
||||
// instead of time.Time since it's more costly to atomically update time.Time variable than int64
|
||||
// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
|
||||
lastStreamCreatedTime int64
|
||||
msgSent int64
|
||||
msgRecv int64
|
||||
lastMsgSentTime int64
|
||||
lastMsgRecvTime int64
|
||||
}
|
2319
vendor/google.golang.org/grpc/internal/transport/transport_test.go
generated
vendored
Normal file
2319
vendor/google.golang.org/grpc/internal/transport/transport_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user