mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: Bump github.com/hashicorp/vault from 1.4.2 to 1.9.9
Bumps [github.com/hashicorp/vault](https://github.com/hashicorp/vault) from 1.4.2 to 1.9.9.
- [Release notes](https://github.com/hashicorp/vault/releases)
- [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/vault/compare/v1.4.2...v1.9.9)
---
updated-dependencies:
- dependency-name: github.com/hashicorp/vault
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
(cherry picked from commit ba40da7e36
)
This commit is contained in:
committed by
mergify[bot]
parent
9ec78a63f3
commit
41a61efee4
5
vendor/github.com/cenkalti/backoff/v3/README.md
generated
vendored
5
vendor/github.com/cenkalti/backoff/v3/README.md
generated
vendored
@ -9,7 +9,10 @@ The retries exponentially increase and stop increasing when a certain threshold
|
||||
|
||||
## Usage
|
||||
|
||||
See https://godoc.org/github.com/cenkalti/backoff#pkg-examples
|
||||
Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end.
|
||||
|
||||
godoc.org does not support modules yet,
|
||||
so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
15
vendor/github.com/cenkalti/backoff/v3/context.go
generated
vendored
15
vendor/github.com/cenkalti/backoff/v3/context.go
generated
vendored
@ -7,7 +7,7 @@ import (
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface {
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
@ -20,7 +20,7 @@ type backOffContext struct {
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
@ -38,11 +38,14 @@ func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||
}
|
||||
}
|
||||
|
||||
func ensureContext(b BackOff) BackOffContext {
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb
|
||||
return cb.Context()
|
||||
}
|
||||
return WithContext(b, context.Background())
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
@ -56,7 +59,7 @@ func (b *backOffContext) NextBackOff() time.Duration {
|
||||
default:
|
||||
}
|
||||
next := b.BackOff.NextBackOff()
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple
|
||||
return Stop
|
||||
}
|
||||
return next
|
||||
|
3
vendor/github.com/cenkalti/backoff/v3/exponential.go
generated
vendored
3
vendor/github.com/cenkalti/backoff/v3/exponential.go
generated
vendored
@ -103,13 +103,14 @@ func (t systemClock) Now() time.Time {
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
|
||||
|
40
vendor/github.com/cenkalti/backoff/v3/retry.go
generated
vendored
40
vendor/github.com/cenkalti/backoff/v3/retry.go
generated
vendored
@ -21,16 +21,31 @@ type Notify func(error, time.Duration)
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
var t *time.Timer
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
cb := ensureContext(b)
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
@ -42,7 +57,7 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return permanent.Err
|
||||
}
|
||||
|
||||
if next = cb.NextBackOff(); next == Stop {
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -50,17 +65,12 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
t = time.NewTimer(next)
|
||||
defer t.Stop()
|
||||
} else {
|
||||
t.Reset(next)
|
||||
}
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-cb.Context().Done():
|
||||
return err
|
||||
case <-t.C:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -74,6 +84,10 @@ func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) *PermanentError {
|
||||
return &PermanentError{
|
||||
|
26
vendor/github.com/cenkalti/backoff/v3/ticker.go
generated
vendored
26
vendor/github.com/cenkalti/backoff/v3/ticker.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -12,7 +13,9 @@ import (
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOffContext
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
@ -24,12 +27,20 @@ type Ticker struct {
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: ensureContext(b),
|
||||
stop: make(chan struct{}),
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
@ -59,7 +70,7 @@ func (t *Ticker) run() {
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.b.Context().Done():
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -78,5 +89,6 @@ func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
return nil
|
||||
}
|
||||
|
||||
return time.After(next)
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
||||
|
35
vendor/github.com/cenkalti/backoff/v3/timer.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/v3/timer.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
32
vendor/github.com/fatih/color/README.md
generated
vendored
32
vendor/github.com/fatih/color/README.md
generated
vendored
@ -1,20 +1,11 @@
|
||||
# Archived project. No maintenance.
|
||||
|
||||
This project is not maintained anymore and is archived. Feel free to fork and
|
||||
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
|
||||
|
||||
Thanks to everyone for their valuable feedback and contributions.
|
||||
|
||||
|
||||
# Color [](https://godoc.org/github.com/fatih/color)
|
||||
# color [](https://github.com/fatih/color/actions) [](https://pkg.go.dev/github.com/fatih/color)
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||
has support for Windows too! The API can be used in several ways, pick one that
|
||||
suits you.
|
||||
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Install
|
||||
@ -87,7 +78,7 @@ notice("Don't forget this...")
|
||||
### Custom fprint functions (FprintFunc)
|
||||
|
||||
```go
|
||||
blue := color.New(FgBlue).FprintfFunc()
|
||||
blue := color.New(color.FgBlue).FprintfFunc()
|
||||
blue(myWriter, "important notice: %s", stars)
|
||||
|
||||
// Mix up with multiple attributes
|
||||
@ -136,14 +127,16 @@ fmt.Println("All text will now be bold magenta.")
|
||||
|
||||
There might be a case where you want to explicitly disable/enable color output. the
|
||||
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||
(for example if the output were piped directly to `less`)
|
||||
(for example if the output were piped directly to `less`).
|
||||
|
||||
`Color` has support to disable/enable colors both globally and for single color
|
||||
definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
|
||||
can easily disable the color output with:
|
||||
The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
|
||||
variable is set (regardless of its value).
|
||||
|
||||
`Color` has support to disable/enable colors programatically both globally and
|
||||
for single color definitions. For example suppose you have a CLI app and a
|
||||
`--no-color` bool flag. You can easily disable the color output with:
|
||||
|
||||
```go
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
@ -165,6 +158,10 @@ c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
```
|
||||
|
||||
## GitHub Actions
|
||||
|
||||
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
@ -179,4 +176,3 @@ c.Println("This prints again cyan...")
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||
|
||||
|
25
vendor/github.com/fatih/color/color.go
generated
vendored
25
vendor/github.com/fatih/color/color.go
generated
vendored
@ -15,9 +15,11 @@ import (
|
||||
var (
|
||||
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||
// false or true based on the stdout's file descriptor referring to a terminal
|
||||
// or not. This is a global option and affects all colors. For more control
|
||||
// over each color block use the methods DisableColor() individually.
|
||||
NoColor = os.Getenv("TERM") == "dumb" ||
|
||||
// or not. It's also set to true if the NO_COLOR environment variable is
|
||||
// set (regardless of its value). This is a global option and affects all
|
||||
// colors. For more control over each color block use the methods
|
||||
// DisableColor() individually.
|
||||
NoColor = noColorExists() || os.Getenv("TERM") == "dumb" ||
|
||||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
|
||||
|
||||
// Output defines the standard output of the print functions. By default
|
||||
@ -33,6 +35,12 @@ var (
|
||||
colorsCacheMu sync.Mutex // protects colorsCache
|
||||
)
|
||||
|
||||
// noColorExists returns true if the environment variable NO_COLOR exists.
|
||||
func noColorExists() bool {
|
||||
_, exists := os.LookupEnv("NO_COLOR")
|
||||
return exists
|
||||
}
|
||||
|
||||
// Color defines a custom color object which is defined by SGR parameters.
|
||||
type Color struct {
|
||||
params []Attribute
|
||||
@ -108,7 +116,14 @@ const (
|
||||
|
||||
// New returns a newly created color object.
|
||||
func New(value ...Attribute) *Color {
|
||||
c := &Color{params: make([]Attribute, 0)}
|
||||
c := &Color{
|
||||
params: make([]Attribute, 0),
|
||||
}
|
||||
|
||||
if noColorExists() {
|
||||
c.noColor = boolPtr(true)
|
||||
}
|
||||
|
||||
c.Add(value...)
|
||||
return c
|
||||
}
|
||||
@ -387,7 +402,7 @@ func (c *Color) EnableColor() {
|
||||
}
|
||||
|
||||
func (c *Color) isNoColorSet() bool {
|
||||
// check first if we have user setted action
|
||||
// check first if we have user set action
|
||||
if c.noColor != nil {
|
||||
return *c.noColor
|
||||
}
|
||||
|
2
vendor/github.com/fatih/color/doc.go
generated
vendored
2
vendor/github.com/fatih/color/doc.go
generated
vendored
@ -118,6 +118,8 @@ the color output with:
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
|
||||
You can also disable the color by setting the NO_COLOR environment variable to any value.
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
@ -275,11 +275,12 @@ func (p *parser) accept(term termType) (string, error) {
|
||||
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||
//
|
||||
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
//
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
func expectPChars(t string) error {
|
||||
const (
|
||||
init = iota
|
||||
|
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
@ -30,6 +30,7 @@ go_library(
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//grpclog",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
@ -37,6 +38,7 @@ go_library(
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
@ -56,8 +58,10 @@ go_test(
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_internal_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_fuzz_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":runtime"],
|
||||
@ -70,7 +74,9 @@ go_test(
|
||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||
"@go_googleapis//google/rpc:status_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
|
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
@ -41,6 +41,12 @@ var (
|
||||
DefaultContextTimeout = 0 * time.Second
|
||||
)
|
||||
|
||||
// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
|
||||
// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
|
||||
var malformedHTTPHeaders = map[string]struct{}{
|
||||
"connection": {},
|
||||
}
|
||||
|
||||
type (
|
||||
rpcMethodKey struct{}
|
||||
httpPathPatternKey struct{}
|
||||
@ -172,11 +178,17 @@ type serverMetadataKey struct{}
|
||||
|
||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||
}
|
||||
|
||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||
if ctx == nil {
|
||||
return md, false
|
||||
}
|
||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||
return
|
||||
}
|
||||
@ -308,6 +320,13 @@ func isPermanentHTTPHeader(hdr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isMalformedHTTPHeader checks whether header belongs to the list of
|
||||
// "malformed headers" and would be rejected by the gRPC server.
|
||||
func isMalformedHTTPHeader(header string) bool {
|
||||
_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
|
||||
return isMalformed
|
||||
}
|
||||
|
||||
// RPCMethod returns the method string for the server context. The returned
|
||||
// string is in the format of "/package.service/method".
|
||||
func RPCMethod(ctx context.Context) (string, bool) {
|
||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
@ -265,7 +265,7 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||
}
|
||||
|
||||
/*
|
||||
Support fot google.protobuf.wrappers on top of primitive types
|
||||
Support for google.protobuf.wrappers on top of primitive types
|
||||
*/
|
||||
|
||||
// StringValue well-known type support as wrapper around string type
|
||||
|
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||
|
||||
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||
// By default http error codes mapped on the following error codes:
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
//
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||
switch httpStatus {
|
||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
@ -53,7 +53,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
|
||||
}
|
||||
|
||||
if isDynamicProtoMessage(fd.Message()) {
|
||||
for _, p := range buildPathsBlindly(k, v) {
|
||||
for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
|
||||
newPath := p
|
||||
if item.path != "" {
|
||||
newPath = item.path + "." + newPath
|
||||
|
12
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
12
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
||||
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
@ -200,7 +200,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
|
||||
st := mux.streamErrorHandler(ctx, err)
|
||||
msg := errorChunk(st)
|
||||
if !wroteHeader {
|
||||
@ -216,6 +216,10 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar
|
||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||
return
|
||||
}
|
||||
if _, derr := w.Write(delimiter); derr != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func errorChunk(st *status.Status) map[string]proto.Message {
|
||||
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
@ -280,6 +280,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
|
||||
return nil
|
||||
}
|
||||
if rv.Kind() == reflect.Slice {
|
||||
if rv.Type().Elem().Kind() == reflect.Uint8 {
|
||||
var sl []byte
|
||||
if err := d.Decode(&sl); err != nil {
|
||||
return err
|
||||
}
|
||||
if sl != nil {
|
||||
rv.SetBytes(sl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var sl []json.RawMessage
|
||||
if err := d.Decode(&sl); err != nil {
|
||||
return err
|
||||
|
98
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
98
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
@ -6,10 +6,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@ -23,15 +26,15 @@ const (
|
||||
// path string before doing any routing.
|
||||
UnescapingModeLegacy UnescapingMode = iota
|
||||
|
||||
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
|
||||
// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
|
||||
// reserved characters.
|
||||
UnescapingModeAllExceptReserved
|
||||
|
||||
// EscapingTypeExceptSlash unescapes URL path parameters except path
|
||||
// seperators, which will be left as "%2F".
|
||||
// UnescapingModeAllExceptSlash unescapes URL path parameters except path
|
||||
// separators, which will be left as "%2F".
|
||||
UnescapingModeAllExceptSlash
|
||||
|
||||
// URL path parameters will be fully decoded.
|
||||
// UnescapingModeAllCharacters unescapes all URL path parameters.
|
||||
UnescapingModeAllCharacters
|
||||
|
||||
// UnescapingModeDefault is the default escaping type.
|
||||
@ -40,6 +43,10 @@ const (
|
||||
UnescapingModeDefault = UnescapingModeLegacy
|
||||
)
|
||||
|
||||
var (
|
||||
encodedPathSplitter = regexp.MustCompile("(/|%2F)")
|
||||
)
|
||||
|
||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||
|
||||
@ -113,11 +120,30 @@ func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
for _, header := range fn.matchedMalformedHeaders() {
|
||||
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
|
||||
}
|
||||
|
||||
return func(mux *ServeMux) {
|
||||
mux.incomingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
|
||||
func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
|
||||
if fn == nil {
|
||||
return nil
|
||||
}
|
||||
headers := make([]string, 0)
|
||||
for header := range malformedHTTPHeaders {
|
||||
out, accept := fn(header)
|
||||
if accept && isMalformedHTTPHeader(out) {
|
||||
headers = append(headers, out)
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||
//
|
||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||
@ -179,6 +205,57 @@ func WithDisablePathLengthFallback() ServeMuxOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
|
||||
// When called the handler will forward the request to the upstream grpc service health check (defined in the
|
||||
// gRPC Health Checking Protocol).
|
||||
//
|
||||
// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
|
||||
// to setup the protocol in the grpc server.
|
||||
//
|
||||
// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
|
||||
func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
|
||||
return func(s *ServeMux) {
|
||||
// error can be ignored since pattern is definitely valid
|
||||
_ = s.HandlePath(
|
||||
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
|
||||
) {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
|
||||
resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
|
||||
Service: r.URL.Query().Get("service"),
|
||||
})
|
||||
if err != nil {
|
||||
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
|
||||
var err error
|
||||
switch resp.GetStatus() {
|
||||
case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
|
||||
err = status.Error(codes.Unavailable, resp.String())
|
||||
case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
|
||||
err = status.Error(codes.NotFound, resp.String())
|
||||
}
|
||||
|
||||
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = outboundMarshaler.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
|
||||
//
|
||||
// See WithHealthEndpointAt for the general implementation.
|
||||
func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
|
||||
return WithHealthEndpointAt(healthCheckClient, "/healthz")
|
||||
}
|
||||
|
||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||
serveMux := &ServeMux{
|
||||
@ -229,7 +306,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
|
||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@ -245,7 +322,16 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
path = r.URL.RawPath
|
||||
}
|
||||
|
||||
components := strings.Split(path[1:], "/")
|
||||
var components []string
|
||||
// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
|
||||
// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
|
||||
// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
|
||||
// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
|
||||
if s.unescapingMode == UnescapingModeAllCharacters {
|
||||
components = encodedPathSplitter.Split(path[1:], -1)
|
||||
} else {
|
||||
components = strings.Split(path[1:], "/")
|
||||
}
|
||||
|
||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||
r.Method = strings.ToUpper(override)
|
||||
|
37
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
37
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@ -13,17 +12,19 @@ import (
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||
|
||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||
var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
|
||||
|
||||
// QueryParameterParser defines interface for all query parameter parsers
|
||||
type QueryParameterParser interface {
|
||||
@ -36,11 +37,15 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili
|
||||
return currentQueryParser.Parse(msg, values, filter)
|
||||
}
|
||||
|
||||
type defaultQueryParser struct{}
|
||||
// DefaultQueryParser is a QueryParameterParser which implements the default
|
||||
// query parameters parsing behavior.
|
||||
//
|
||||
// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
|
||||
type DefaultQueryParser struct{}
|
||||
|
||||
// Parse populates "values" into "msg".
|
||||
// A value is ignored if its key starts with one of the elements in "filter".
|
||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
for key, values := range values {
|
||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||
if len(match) == 3 {
|
||||
@ -234,7 +239,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
|
||||
case protoreflect.StringKind:
|
||||
return protoreflect.ValueOfString(value), nil
|
||||
case protoreflect.BytesKind:
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
v, err := Bytes(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
@ -250,18 +255,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
||||
var msg proto.Message
|
||||
switch msgDescriptor.FullName() {
|
||||
case "google.protobuf.Timestamp":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = timestamppb.New(t)
|
||||
case "google.protobuf.Duration":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
@ -312,7 +311,7 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
||||
case "google.protobuf.StringValue":
|
||||
msg = &wrapperspb.StringValue{Value: value}
|
||||
case "google.protobuf.BytesValue":
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
v, err := Bytes(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
@ -321,6 +320,20 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
||||
fm := &field_mask.FieldMask{}
|
||||
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||
msg = fm
|
||||
case "google.protobuf.Value":
|
||||
var v structpb.Value
|
||||
err := protojson.Unmarshal([]byte(value), &v)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &v
|
||||
case "google.protobuf.Struct":
|
||||
var v structpb.Struct
|
||||
err := protojson.Unmarshal([]byte(value), &v)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &v
|
||||
default:
|
||||
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||
}
|
||||
|
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
@ -8,6 +8,7 @@ go_library(
|
||||
"doc.go",
|
||||
"pattern.go",
|
||||
"readerfactory.go",
|
||||
"string_array_flag.go",
|
||||
"trie.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||
@ -16,7 +17,10 @@ go_library(
|
||||
go_test(
|
||||
name = "utilities_test",
|
||||
size = "small",
|
||||
srcs = ["trie_test.go"],
|
||||
srcs = [
|
||||
"string_array_flag_test.go",
|
||||
"trie_test.go",
|
||||
],
|
||||
deps = [":utilities"],
|
||||
)
|
||||
|
||||
|
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// flagInterface is an cut down interface to `flag`
|
||||
type flagInterface interface {
|
||||
Var(value flag.Value, name string, usage string)
|
||||
}
|
||||
|
||||
// StringArrayFlag defines a flag with the specified name and usage string.
|
||||
// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
|
||||
func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
|
||||
value := &StringArrayFlags{}
|
||||
f.Var(value, name, usage)
|
||||
return value
|
||||
}
|
||||
|
||||
// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
|
||||
type StringArrayFlags []string
|
||||
|
||||
// String returns a string representation of `StringArrayFlags`
|
||||
func (i *StringArrayFlags) String() string {
|
||||
return strings.Join(*i, ",")
|
||||
}
|
||||
|
||||
// Set appends a value to `StringArrayFlags`
|
||||
func (i *StringArrayFlags) Set(value string) error {
|
||||
*i = append(*i, value)
|
||||
return nil
|
||||
}
|
12
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
12
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
@ -17,11 +17,8 @@ JSON output mode for production.
|
||||
|
||||
## Stability Note
|
||||
|
||||
While this library is fully open source and HashiCorp will be maintaining it
|
||||
(since we are and will be making extensive use of it), the API and output
|
||||
format is subject to minor changes as we fully bake and vet it in our projects.
|
||||
This notice will be removed once it's fully integrated into our major projects
|
||||
and no further changes are anticipated.
|
||||
This library has reached 1.0 stability. It's API can be considered solidified
|
||||
and promised through future versions.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
@ -102,7 +99,7 @@ into all the callers.
|
||||
### Using `hclog.Fmt()`
|
||||
|
||||
```go
|
||||
var int totalBandwidth = 200
|
||||
totalBandwidth := 200
|
||||
appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
|
||||
```
|
||||
|
||||
@ -146,3 +143,6 @@ log.Printf("[DEBUG] %d", 42)
|
||||
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
|
||||
specify `InferLevels: true`, you will not see any output here. You must change
|
||||
`appLogger` to `DEBUG` to see output. See the docs for more information.
|
||||
|
||||
If the log lines start with a timestamp you can use the
|
||||
`InferLevelsWithTimestamp` option to try and ignore them.
|
||||
|
2
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
2
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
@ -2,6 +2,7 @@ package hclog
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -14,6 +15,7 @@ var (
|
||||
DefaultOptions = &LoggerOptions{
|
||||
Level: DefaultLevel,
|
||||
Output: DefaultOutput,
|
||||
TimeFn: time.Now,
|
||||
}
|
||||
)
|
||||
|
||||
|
7
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
7
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
@ -180,9 +180,10 @@ func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) i
|
||||
|
||||
func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
return &stdlogAdapter{
|
||||
log: i,
|
||||
inferLevels: opts.InferLevels,
|
||||
forceLevel: opts.ForceLevel,
|
||||
log: i,
|
||||
inferLevels: opts.InferLevels,
|
||||
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
|
||||
forceLevel: opts.ForceLevel,
|
||||
}
|
||||
}
|
||||
|
||||
|
39
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
39
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
@ -60,6 +60,7 @@ type intLogger struct {
|
||||
callerOffset int
|
||||
name string
|
||||
timeFormat string
|
||||
timeFn TimeFunction
|
||||
disableTime bool
|
||||
|
||||
// This is an interface so that it's shared by any derived loggers, since
|
||||
@ -116,6 +117,7 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
json: opts.JSONFormat,
|
||||
name: opts.Name,
|
||||
timeFormat: TimeFormat,
|
||||
timeFn: time.Now,
|
||||
disableTime: opts.DisableTime,
|
||||
mutex: mutex,
|
||||
writer: newWriter(output, opts.Color),
|
||||
@ -130,6 +132,9 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
if l.json {
|
||||
l.timeFormat = TimeFormatJSON
|
||||
}
|
||||
if opts.TimeFn != nil {
|
||||
l.timeFn = opts.TimeFn
|
||||
}
|
||||
if opts.TimeFormat != "" {
|
||||
l.timeFormat = opts.TimeFormat
|
||||
}
|
||||
@ -152,7 +157,7 @@ func (l *intLogger) log(name string, level Level, msg string, args ...interface{
|
||||
return
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
t := l.timeFn()
|
||||
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
@ -199,6 +204,24 @@ func trimCallerPath(path string) string {
|
||||
return path[idx+1:]
|
||||
}
|
||||
|
||||
// isNormal indicates if the rune is one allowed to exist as an unquoted
|
||||
// string value. This is a subset of ASCII, `-` through `~`.
|
||||
func isNormal(r rune) bool {
|
||||
return 0x2D <= r && r <= 0x7E // - through ~
|
||||
}
|
||||
|
||||
// needsQuoting returns false if all the runes in string are normal, according
|
||||
// to isNormal
|
||||
func needsQuoting(str string) bool {
|
||||
for _, r := range str {
|
||||
if !isNormal(r) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Non-JSON logging format function
|
||||
func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
|
||||
|
||||
@ -263,6 +286,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
val = st
|
||||
if st == "" {
|
||||
val = `""`
|
||||
raw = true
|
||||
}
|
||||
case int:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
@ -323,13 +347,11 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
l.writer.WriteString("=\n")
|
||||
writeIndent(l.writer, val, " | ")
|
||||
l.writer.WriteString(" ")
|
||||
} else if !raw && strings.ContainsAny(val, " \t") {
|
||||
} else if !raw && needsQuoting(val) {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteByte('=')
|
||||
l.writer.WriteByte('"')
|
||||
l.writer.WriteString(val)
|
||||
l.writer.WriteByte('"')
|
||||
l.writer.WriteString(strconv.Quote(val))
|
||||
} else {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
@ -687,9 +709,10 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
newLog.callerOffset = l.callerOffset + 4
|
||||
}
|
||||
return &stdlogAdapter{
|
||||
log: &newLog,
|
||||
inferLevels: opts.InferLevels,
|
||||
forceLevel: opts.ForceLevel,
|
||||
log: &newLog,
|
||||
inferLevels: opts.InferLevels,
|
||||
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
|
||||
forceLevel: opts.ForceLevel,
|
||||
}
|
||||
}
|
||||
|
||||
|
15
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
15
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -212,6 +213,15 @@ type StandardLoggerOptions struct {
|
||||
// [DEBUG] and strip it off before reapplying it.
|
||||
InferLevels bool
|
||||
|
||||
// Indicate that some minimal parsing should be done on strings to try
|
||||
// and detect their level and re-emit them while ignoring possible
|
||||
// timestamp values in the beginning of the string.
|
||||
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
|
||||
// [DEBUG] and strip it off before reapplying it.
|
||||
// The timestamp detection may result in false positives and incomplete
|
||||
// string outputs.
|
||||
InferLevelsWithTimestamp bool
|
||||
|
||||
// ForceLevel is used to force all output from the standard logger to be at
|
||||
// the specified level. Similar to InferLevels, this will strip any level
|
||||
// prefix contained in the logged string before applying the forced level.
|
||||
@ -219,6 +229,8 @@ type StandardLoggerOptions struct {
|
||||
ForceLevel Level
|
||||
}
|
||||
|
||||
type TimeFunction = func() time.Time
|
||||
|
||||
// LoggerOptions can be used to configure a new logger.
|
||||
type LoggerOptions struct {
|
||||
// Name of the subsystem to prefix logs with
|
||||
@ -248,6 +260,9 @@ type LoggerOptions struct {
|
||||
// The time format to use instead of the default
|
||||
TimeFormat string
|
||||
|
||||
// A function which is called to get the time object that is formatted using `TimeFormat`
|
||||
TimeFn TimeFunction
|
||||
|
||||
// Control whether or not to display the time at all. This is required
|
||||
// because setting TimeFormat to empty assumes the default format.
|
||||
DisableTime bool
|
||||
|
21
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
21
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
@ -3,16 +3,22 @@ package hclog
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Regex to ignore characters commonly found in timestamp formats from the
|
||||
// beginning of inputs.
|
||||
var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
|
||||
|
||||
// Provides a io.Writer to shim the data out of *log.Logger
|
||||
// and back into our Logger. This is basically the only way to
|
||||
// build upon *log.Logger.
|
||||
type stdlogAdapter struct {
|
||||
log Logger
|
||||
inferLevels bool
|
||||
forceLevel Level
|
||||
log Logger
|
||||
inferLevels bool
|
||||
inferLevelsWithTimestamp bool
|
||||
forceLevel Level
|
||||
}
|
||||
|
||||
// Take the data, infer the levels if configured, and send it through
|
||||
@ -28,6 +34,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) {
|
||||
// Log at the forced level
|
||||
s.dispatch(str, s.forceLevel)
|
||||
} else if s.inferLevels {
|
||||
if s.inferLevelsWithTimestamp {
|
||||
str = s.trimTimestamp(str)
|
||||
}
|
||||
|
||||
level, str := s.pickLevel(str)
|
||||
s.dispatch(str, level)
|
||||
} else {
|
||||
@ -74,6 +84,11 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stdlogAdapter) trimTimestamp(str string) string {
|
||||
idx := logTimestampRegexp.FindStringIndex(str)
|
||||
return str[idx[1]:]
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
l *log.Logger
|
||||
}
|
||||
|
42
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
42
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
@ -505,7 +505,7 @@ func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
@ -628,6 +628,20 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
|
||||
// fill unusedNodeKeys with keys from the AST
|
||||
// a slice because we have to do equals case fold to match Filter
|
||||
unusedNodeKeys := make(map[string][]token.Pos, 0)
|
||||
for _, item := range list.Items {
|
||||
for _, k := range item.Keys{
|
||||
if k.Token.JSON || k.Token.Type == token.IDENT {
|
||||
fn := k.Token.Value().(string)
|
||||
sl := unusedNodeKeys[fn]
|
||||
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
field, fieldValue := f.field, f.val
|
||||
if !fieldValue.IsValid() {
|
||||
@ -661,7 +675,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
|
||||
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
case "unusedKeyPositions":
|
||||
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||
continue
|
||||
}
|
||||
@ -682,8 +696,9 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
// Track the used keys
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
unusedNodeKeys = removeCaseFold(unusedNodeKeys, fieldName)
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
@ -716,6 +731,13 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unusedNodeKeys) > 0 {
|
||||
// like decodedFields, populated the unusedKeys field(s)
|
||||
for _, v := range unusedKeysVal {
|
||||
v.Set(reflect.ValueOf(unusedNodeKeys))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -727,3 +749,17 @@ func findNodeType() reflect.Type {
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
||||
|
||||
func removeCaseFold(xs map[string][]token.Pos, y string) map[string][]token.Pos {
|
||||
var toDel []string
|
||||
|
||||
for i := range xs {
|
||||
if strings.EqualFold(i, y) {
|
||||
toDel = append(toDel, i)
|
||||
}
|
||||
}
|
||||
for _, i := range toDel {
|
||||
delete(xs, i)
|
||||
}
|
||||
return xs
|
||||
}
|
||||
|
15
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
15
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
@ -25,6 +25,8 @@ func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
var unknownPos token.Pos
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
@ -108,7 +110,12 @@ func (o *ObjectList) Elem() *ObjectList {
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
// If an Object has no members, it won't have a first item
|
||||
// to use as position
|
||||
if len(o.Items) == 0 {
|
||||
return unknownPos
|
||||
}
|
||||
// Return the uninitialized position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
@ -133,10 +140,10 @@ type ObjectItem struct {
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
// If a parsed object has no keys, there is no position
|
||||
// for its first element.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
return unknownPos
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
|
177
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
177
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
@ -2,15 +2,24 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
)
|
||||
|
||||
const (
|
||||
initialBackoff = 1 * time.Second
|
||||
defaultMaxBackoff = 5 * time.Minute
|
||||
)
|
||||
|
||||
// AuthMethod is the interface that auto-auth methods implement for the agent
|
||||
// to use.
|
||||
type AuthMethod interface {
|
||||
// Authenticate returns a mount path, header, request body, and error.
|
||||
// The header may be nil if no special header is needed.
|
||||
@ -20,6 +29,13 @@ type AuthMethod interface {
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// AuthMethodWithClient is an extended interface that can return an API client
|
||||
// for use during the authentication call.
|
||||
type AuthMethodWithClient interface {
|
||||
AuthMethod
|
||||
AuthClient(client *api.Client) (*api.Client, error)
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
Logger hclog.Logger
|
||||
MountPath string
|
||||
@ -30,13 +46,14 @@ type AuthConfig struct {
|
||||
// AuthHandler is responsible for keeping a token alive and renewed and passing
|
||||
// new tokens to the sink server
|
||||
type AuthHandler struct {
|
||||
DoneCh chan struct{}
|
||||
OutputCh chan string
|
||||
TemplateTokenCh chan string
|
||||
token string
|
||||
logger hclog.Logger
|
||||
client *api.Client
|
||||
random *rand.Rand
|
||||
wrapTTL time.Duration
|
||||
maxBackoff time.Duration
|
||||
enableReauthOnNewCredentials bool
|
||||
enableTemplateTokenCh bool
|
||||
}
|
||||
@ -45,21 +62,24 @@ type AuthHandlerConfig struct {
|
||||
Logger hclog.Logger
|
||||
Client *api.Client
|
||||
WrapTTL time.Duration
|
||||
MaxBackoff time.Duration
|
||||
Token string
|
||||
EnableReauthOnNewCredentials bool
|
||||
EnableTemplateTokenCh bool
|
||||
}
|
||||
|
||||
func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
|
||||
ah := &AuthHandler{
|
||||
DoneCh: make(chan struct{}),
|
||||
// This is buffered so that if we try to output after the sink server
|
||||
// has been shut down, during agent shutdown, we won't block
|
||||
OutputCh: make(chan string, 1),
|
||||
TemplateTokenCh: make(chan string, 1),
|
||||
token: conf.Token,
|
||||
logger: conf.Logger,
|
||||
client: conf.Client,
|
||||
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
wrapTTL: conf.WrapTTL,
|
||||
maxBackoff: conf.MaxBackoff,
|
||||
enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials,
|
||||
enableTemplateTokenCh: conf.EnableTemplateTokenCh,
|
||||
}
|
||||
@ -67,23 +87,28 @@ func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
|
||||
return ah
|
||||
}
|
||||
|
||||
func backoffOrQuit(ctx context.Context, backoff time.Duration) {
|
||||
func backoffOrQuit(ctx context.Context, backoff *agentBackoff) {
|
||||
select {
|
||||
case <-time.After(backoff):
|
||||
case <-time.After(backoff.current):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
// Increase exponential backoff for the next time if we don't
|
||||
// successfully auth/renew/etc.
|
||||
backoff.next()
|
||||
}
|
||||
|
||||
func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
if am == nil {
|
||||
panic("nil auth method")
|
||||
return errors.New("auth handler: nil auth method")
|
||||
}
|
||||
|
||||
backoff := newAgentBackoff(ah.maxBackoff)
|
||||
|
||||
ah.logger.Info("starting auth handler")
|
||||
defer func() {
|
||||
am.Shutdown()
|
||||
close(ah.OutputCh)
|
||||
close(ah.DoneCh)
|
||||
close(ah.TemplateTokenCh)
|
||||
ah.logger.Info("auth handler stopped")
|
||||
}()
|
||||
@ -109,31 +134,70 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
}
|
||||
|
||||
var watcher *api.LifetimeWatcher
|
||||
first := true
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
return nil
|
||||
|
||||
default:
|
||||
}
|
||||
|
||||
// Create a fresh backoff value
|
||||
backoff := 2*time.Second + time.Duration(ah.random.Int63()%int64(time.Second*2)-int64(time.Second))
|
||||
var clientToUse *api.Client
|
||||
var err error
|
||||
var path string
|
||||
var data map[string]interface{}
|
||||
var header http.Header
|
||||
|
||||
ah.logger.Info("authenticating")
|
||||
path, header, data, err := am.Authenticate(ctx, ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoff.Seconds())
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
switch am.(type) {
|
||||
case AuthMethodWithClient:
|
||||
clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
clientToUse = ah.client
|
||||
}
|
||||
|
||||
clientToUse := ah.client
|
||||
if ah.wrapTTL > 0 {
|
||||
wrapClient, err := ah.client.Clone()
|
||||
var secret *api.Secret = new(api.Secret)
|
||||
if first && ah.token != "" {
|
||||
ah.logger.Debug("using preloaded token")
|
||||
|
||||
first = false
|
||||
ah.logger.Debug("lookup-self with preloaded token")
|
||||
clientToUse.SetToken(ah.token)
|
||||
|
||||
secret, err = clientToUse.Logical().Read("auth/token/lookup-self")
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoff.Seconds())
|
||||
ah.logger.Error("could not look up token", "err", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
|
||||
duration, _ := secret.Data["ttl"].(json.Number).Int64()
|
||||
secret.Auth = &api.SecretAuth{
|
||||
ClientToken: secret.Data["id"].(string),
|
||||
LeaseDuration: int(duration),
|
||||
Renewable: secret.Data["renewable"].(bool),
|
||||
}
|
||||
} else {
|
||||
ah.logger.Info("authenticating")
|
||||
|
||||
path, header, data, err = am.Authenticate(ctx, ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if ah.wrapTTL > 0 {
|
||||
wrapClient, err := clientToUse.Clone()
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
@ -148,29 +212,33 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
}
|
||||
}
|
||||
|
||||
secret, err := clientToUse.Logical().Write(path, data)
|
||||
// Check errors/sanity
|
||||
if err != nil {
|
||||
ah.logger.Error("error authenticating", "error", err, "backoff", backoff.Seconds())
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
// This should only happen if there's no preloaded token (regular auto-auth login)
|
||||
// or if a preloaded token has expired and is now switching to auto-auth.
|
||||
if secret.Auth == nil {
|
||||
secret, err = clientToUse.Logical().Write(path, data)
|
||||
// Check errors/sanity
|
||||
if err != nil {
|
||||
ah.logger.Error("error authenticating", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case ah.wrapTTL > 0:
|
||||
if secret.WrapInfo == nil {
|
||||
ah.logger.Error("authentication returned nil wrap info", "backoff", backoff.Seconds())
|
||||
ah.logger.Error("authentication returned nil wrap info", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
if secret.WrapInfo.Token == "" {
|
||||
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoff.Seconds())
|
||||
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo)
|
||||
if err != nil {
|
||||
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoff.Seconds())
|
||||
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
@ -181,6 +249,7 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoff.reset()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -194,12 +263,12 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
|
||||
default:
|
||||
if secret == nil || secret.Auth == nil {
|
||||
ah.logger.Error("authentication returned nil auth info", "backoff", backoff.Seconds())
|
||||
ah.logger.Error("authentication returned nil auth info", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
if secret.Auth.ClientToken == "" {
|
||||
ah.logger.Error("authentication returned empty client token", "backoff", backoff.Seconds())
|
||||
ah.logger.Error("authentication returned empty client token", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
@ -210,17 +279,18 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoff.reset()
|
||||
}
|
||||
|
||||
if watcher != nil {
|
||||
watcher.Stop()
|
||||
}
|
||||
|
||||
watcher, err = ah.client.NewLifetimeWatcher(&api.LifetimeWatcherInput{
|
||||
watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{
|
||||
Secret: secret,
|
||||
})
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating lifetime watcher, backing off and retrying", "error", err, "backoff", backoff.Seconds())
|
||||
ah.logger.Error("error creating lifetime watcher, backing off and retrying", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
@ -254,3 +324,42 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// agentBackoff tracks exponential backoff state.
|
||||
type agentBackoff struct {
|
||||
max time.Duration
|
||||
current time.Duration
|
||||
}
|
||||
|
||||
func newAgentBackoff(max time.Duration) *agentBackoff {
|
||||
if max <= 0 {
|
||||
max = defaultMaxBackoff
|
||||
}
|
||||
|
||||
return &agentBackoff{
|
||||
max: max,
|
||||
current: initialBackoff,
|
||||
}
|
||||
}
|
||||
|
||||
// next determines the next backoff duration that is roughly twice
|
||||
// the current value, capped to a max value, with a measure of randomness.
|
||||
func (b *agentBackoff) next() {
|
||||
maxBackoff := 2 * b.current
|
||||
|
||||
if maxBackoff > b.max {
|
||||
maxBackoff = b.max
|
||||
}
|
||||
|
||||
// Trim a random amount (0-25%) off the doubled duration
|
||||
trim := rand.Int63n(int64(maxBackoff) / 4)
|
||||
b.current = maxBackoff - time.Duration(trim)
|
||||
}
|
||||
|
||||
func (b *agentBackoff) reset() {
|
||||
b.current = initialBackoff
|
||||
}
|
||||
|
||||
func (b agentBackoff) String() string {
|
||||
return b.current.Truncate(10 * time.Millisecond).String()
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/vault/command/agent/auth/kubernetes/kubernetes.go
generated
vendored
3
vendor/github.com/hashicorp/vault/command/agent/auth/kubernetes/kubernetes.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/command/agent/auth"
|
||||
@ -78,7 +77,7 @@ func (k *kubernetesMethod) Authenticate(ctx context.Context, client *api.Client)
|
||||
|
||||
jwtString, err := k.readJWT()
|
||||
if err != nil {
|
||||
return "", nil, nil, errwrap.Wrapf("error reading JWT with Kubernetes Auth: {{err}}", err)
|
||||
return "", nil, nil, fmt.Errorf("error reading JWT with Kubernetes Auth: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/login", k.mountPath), nil, map[string]interface{}{
|
||||
|
13
vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
13
vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
@ -10,9 +10,10 @@
|
||||
//
|
||||
package lz4
|
||||
|
||||
import "math/bits"
|
||||
|
||||
import "sync"
|
||||
import (
|
||||
"math/bits"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Extension is the LZ4 frame file name extension
|
||||
@ -20,8 +21,9 @@ const (
|
||||
// Version is the LZ4 frame format version
|
||||
Version = 1
|
||||
|
||||
frameMagic uint32 = 0x184D2204
|
||||
frameSkipMagic uint32 = 0x184D2A50
|
||||
frameMagic uint32 = 0x184D2204
|
||||
frameSkipMagic uint32 = 0x184D2A50
|
||||
frameMagicLegacy uint32 = 0x184C2102
|
||||
|
||||
// The following constants are used to setup the compression algorithm.
|
||||
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
|
||||
@ -108,6 +110,7 @@ type Header struct {
|
||||
done bool // Header processed flag (Read or Write and checked).
|
||||
}
|
||||
|
||||
// Reset reset internal status
|
||||
func (h *Header) Reset() {
|
||||
h.done = false
|
||||
}
|
||||
|
207
vendor/github.com/pierrec/lz4/reader_legacy.go
generated
vendored
Normal file
207
vendor/github.com/pierrec/lz4/reader_legacy.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReaderLegacy implements the LZ4Demo frame decoder.
|
||||
// The Header is set after the first call to Read().
|
||||
type ReaderLegacy struct {
|
||||
Header
|
||||
// Handler called when a block has been successfully read.
|
||||
// It provides the number of bytes read.
|
||||
OnBlockDone func(size int)
|
||||
|
||||
lastBlock bool
|
||||
buf [8]byte // Scrap buffer.
|
||||
pos int64 // Current position in src.
|
||||
src io.Reader // Source.
|
||||
zdata []byte // Compressed data.
|
||||
data []byte // Uncompressed data.
|
||||
idx int // Index of unread bytes into data.
|
||||
skip int64 // Bytes to skip before next read.
|
||||
dpos int64 // Position in dest
|
||||
}
|
||||
|
||||
// NewReaderLegacy returns a new LZ4Demo frame decoder.
|
||||
// No access to the underlying io.Reader is performed.
|
||||
func NewReaderLegacy(src io.Reader) *ReaderLegacy {
|
||||
r := &ReaderLegacy{src: src}
|
||||
return r
|
||||
}
|
||||
|
||||
// readHeader checks the frame magic number and parses the frame descriptoz.
|
||||
// Skippable frames are supported even as a first frame although the LZ4
|
||||
// specifications recommends skippable frames not to be used as first frames.
|
||||
func (z *ReaderLegacy) readLegacyHeader() error {
|
||||
z.lastBlock = false
|
||||
magic, err := z.readUint32()
|
||||
if err != nil {
|
||||
z.pos += 4
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return io.EOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if magic != frameMagicLegacy {
|
||||
return ErrInvalid
|
||||
}
|
||||
z.pos += 4
|
||||
|
||||
// Legacy has fixed 8MB blocksizes
|
||||
// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
|
||||
bSize := blockSize4M * 2
|
||||
|
||||
// Allocate the compressed/uncompressed buffers.
|
||||
// The compressed buffer cannot exceed the uncompressed one.
|
||||
if n := 2 * bSize; cap(z.zdata) < n {
|
||||
z.zdata = make([]byte, n, n)
|
||||
}
|
||||
if debugFlag {
|
||||
debug("header block max size size=%d", bSize)
|
||||
}
|
||||
z.zdata = z.zdata[:bSize]
|
||||
z.data = z.zdata[:cap(z.zdata)][bSize:]
|
||||
z.idx = len(z.data)
|
||||
|
||||
z.Header.done = true
|
||||
if debugFlag {
|
||||
debug("header read: %v", z.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read decompresses data from the underlying source into the supplied buffer.
|
||||
//
|
||||
// Since there can be multiple streams concatenated, Header values may
|
||||
// change between calls to Read(). If that is the case, no data is actually read from
|
||||
// the underlying io.Reader, to allow for potential input buffer resizing.
|
||||
func (z *ReaderLegacy) Read(buf []byte) (int, error) {
|
||||
if debugFlag {
|
||||
debug("Read buf len=%d", len(buf))
|
||||
}
|
||||
if !z.Header.done {
|
||||
if err := z.readLegacyHeader(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if debugFlag {
|
||||
debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
|
||||
len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if z.idx == len(z.data) {
|
||||
// No data ready for reading, process the next block.
|
||||
if debugFlag {
|
||||
debug(" reading block from writer %d %d", z.idx, blockSize4M*2)
|
||||
}
|
||||
|
||||
// Reset uncompressed buffer
|
||||
z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
|
||||
|
||||
bLen, err := z.readUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if debugFlag {
|
||||
debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
|
||||
}
|
||||
z.pos += 4
|
||||
|
||||
// Legacy blocks are always compressed, even when detrimental
|
||||
if debugFlag {
|
||||
debug(" compressed block size %d", bLen)
|
||||
}
|
||||
|
||||
if int(bLen) > cap(z.data) {
|
||||
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
|
||||
}
|
||||
zdata := z.zdata[:bLen]
|
||||
if _, err := io.ReadFull(z.src, zdata); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += int64(bLen)
|
||||
|
||||
n, err := UncompressBlock(zdata, z.data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
z.data = z.data[:n]
|
||||
if z.OnBlockDone != nil {
|
||||
z.OnBlockDone(n)
|
||||
}
|
||||
|
||||
z.idx = 0
|
||||
|
||||
// Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
|
||||
// it means we've reached the end...
|
||||
if n < blockSize4M*2 {
|
||||
z.lastBlock = true
|
||||
}
|
||||
}
|
||||
|
||||
if z.skip > int64(len(z.data[z.idx:])) {
|
||||
z.skip -= int64(len(z.data[z.idx:]))
|
||||
z.dpos += int64(len(z.data[z.idx:]))
|
||||
z.idx = len(z.data)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
z.idx += int(z.skip)
|
||||
z.dpos += z.skip
|
||||
z.skip = 0
|
||||
|
||||
n := copy(buf, z.data[z.idx:])
|
||||
z.idx += n
|
||||
z.dpos += int64(n)
|
||||
if debugFlag {
|
||||
debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
|
||||
}
|
||||
if z.lastBlock && len(z.data) == z.idx {
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker, but supports seeking forward from the current
|
||||
// position only. Any other seek will return an error. Allows skipping output
|
||||
// bytes which aren't needed, which in some scenarios is faster than reading
|
||||
// and discarding them.
|
||||
// Note this may cause future calls to Read() to read 0 bytes if all of the
|
||||
// data they would have returned is skipped.
|
||||
func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset < 0 || whence != io.SeekCurrent {
|
||||
return z.dpos + z.skip, ErrUnsupportedSeek
|
||||
}
|
||||
z.skip += offset
|
||||
return z.dpos + z.skip, nil
|
||||
}
|
||||
|
||||
// Reset discards the Reader's state and makes it equivalent to the
|
||||
// result of its original state from NewReader, but reading from r instead.
|
||||
// This permits reusing a Reader rather than allocating a new one.
|
||||
func (z *ReaderLegacy) Reset(r io.Reader) {
|
||||
z.Header = Header{}
|
||||
z.pos = 0
|
||||
z.src = r
|
||||
z.zdata = z.zdata[:0]
|
||||
z.data = z.data[:0]
|
||||
z.idx = 0
|
||||
}
|
||||
|
||||
// readUint32 reads an uint32 into the supplied buffer.
|
||||
// The idea is to make use of the already allocated buffers avoiding additional allocations.
|
||||
func (z *ReaderLegacy) readUint32() (uint32, error) {
|
||||
buf := z.buf[:4]
|
||||
_, err := io.ReadFull(z.src, buf)
|
||||
x := binary.LittleEndian.Uint32(buf)
|
||||
return x, err
|
||||
}
|
28
vendor/github.com/pierrec/lz4/writer.go
generated
vendored
28
vendor/github.com/pierrec/lz4/writer.go
generated
vendored
@ -3,9 +3,10 @@ package lz4
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pierrec/lz4/internal/xxh32"
|
||||
"io"
|
||||
"runtime"
|
||||
|
||||
"github.com/pierrec/lz4/internal/xxh32"
|
||||
)
|
||||
|
||||
// zResult contains the results of compressing a block.
|
||||
@ -83,10 +84,8 @@ func (z *Writer) WithConcurrency(n int) *Writer {
|
||||
z.err = err
|
||||
}
|
||||
}
|
||||
if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
|
||||
// It is now safe to release the buffer as no longer in use by any goroutine.
|
||||
putBuffer(cap(res.data), res.data)
|
||||
}
|
||||
// It is now safe to release the buffer as no longer in use by any goroutine.
|
||||
putBuffer(cap(res.data), res.data)
|
||||
if h := z.OnBlockDone; h != nil {
|
||||
h(n)
|
||||
}
|
||||
@ -230,7 +229,12 @@ func (z *Writer) compressBlock(data []byte) error {
|
||||
if z.c != nil {
|
||||
c := make(chan zResult)
|
||||
z.c <- c // Send now to guarantee order
|
||||
go writerCompressBlock(c, z.Header, data)
|
||||
|
||||
// get a buffer from the pool and copy the data over
|
||||
block := getBuffer(z.Header.BlockMaxSize)[:len(data)]
|
||||
copy(block, data)
|
||||
|
||||
go writerCompressBlock(c, z.Header, block)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -298,7 +302,9 @@ func (z *Writer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := z.data[:z.idx]
|
||||
data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])]
|
||||
copy(data, z.data[:z.idx])
|
||||
|
||||
z.idx = 0
|
||||
if z.c == nil {
|
||||
return z.compressBlock(data)
|
||||
@ -370,6 +376,10 @@ func (z *Writer) Reset(w io.Writer) {
|
||||
z.checksum.Reset()
|
||||
z.idx = 0
|
||||
z.err = nil
|
||||
// reset hashtable to ensure deterministic output.
|
||||
for i := range z.hashtable {
|
||||
z.hashtable[i] = 0
|
||||
}
|
||||
z.WithConcurrency(n)
|
||||
}
|
||||
|
||||
@ -397,9 +407,13 @@ func writerCompressBlock(c chan zResult, header Header, data []byte) {
|
||||
if zn > 0 && zn < len(data) {
|
||||
res.size = uint32(zn)
|
||||
res.data = zdata[:zn]
|
||||
// release the uncompressed block since it is not used anymore
|
||||
putBuffer(header.BlockMaxSize, data)
|
||||
} else {
|
||||
res.size = uint32(len(data)) | compressedBlockFlag
|
||||
res.data = data
|
||||
// release the compressed block since it was not used
|
||||
putBuffer(header.BlockMaxSize, zdata)
|
||||
}
|
||||
if header.BlockChecksum {
|
||||
res.checksum = xxh32.ChecksumZero(res.data)
|
||||
|
182
vendor/github.com/pierrec/lz4/writer_legacy.go
generated
vendored
Normal file
182
vendor/github.com/pierrec/lz4/writer_legacy.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriterLegacy implements the LZ4Demo frame decoder.
|
||||
type WriterLegacy struct {
|
||||
Header
|
||||
// Handler called when a block has been successfully read.
|
||||
// It provides the number of bytes read.
|
||||
OnBlockDone func(size int)
|
||||
|
||||
dst io.Writer // Destination.
|
||||
data []byte // Data to be compressed + buffer for compressed data.
|
||||
idx int // Index into data.
|
||||
hashtable [winSize]int // Hash table used in CompressBlock().
|
||||
}
|
||||
|
||||
// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format.
|
||||
// No access to the underlying io.Writer is performed.
|
||||
// The supplied Header is checked at the first Write.
|
||||
// It is ok to change it before the first Write but then not until a Reset() is performed.
|
||||
func NewWriterLegacy(dst io.Writer) *WriterLegacy {
|
||||
z := new(WriterLegacy)
|
||||
z.Reset(dst)
|
||||
return z
|
||||
}
|
||||
|
||||
// Write compresses data from the supplied buffer into the underlying io.Writer.
|
||||
// Write does not return until the data has been written.
|
||||
func (z *WriterLegacy) Write(buf []byte) (int, error) {
|
||||
if !z.Header.done {
|
||||
if err := z.writeHeader(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if debugFlag {
|
||||
debug("input buffer len=%d index=%d", len(buf), z.idx)
|
||||
}
|
||||
|
||||
zn := len(z.data)
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
if z.idx == 0 && len(buf) >= zn {
|
||||
// Avoid a copy as there is enough data for a block.
|
||||
if err := z.compressBlock(buf[:zn]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += zn
|
||||
buf = buf[zn:]
|
||||
continue
|
||||
}
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(z.data[z.idx:], buf)
|
||||
n += m
|
||||
z.idx += m
|
||||
buf = buf[m:]
|
||||
if debugFlag {
|
||||
debug("%d bytes copied to buf, current index %d", n, z.idx)
|
||||
}
|
||||
|
||||
if z.idx < len(z.data) {
|
||||
// Buffer not filled.
|
||||
if debugFlag {
|
||||
debug("need more data for compression")
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Buffer full.
|
||||
if err := z.compressBlock(z.data); err != nil {
|
||||
return n, err
|
||||
}
|
||||
z.idx = 0
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// writeHeader builds and writes the header to the underlying io.Writer.
|
||||
func (z *WriterLegacy) writeHeader() error {
|
||||
// Legacy has fixed 8MB blocksizes
|
||||
// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
|
||||
bSize := 2 * blockSize4M
|
||||
|
||||
buf := make([]byte, 2*bSize, 2*bSize)
|
||||
z.data = buf[:bSize] // Uncompressed buffer is the first half.
|
||||
|
||||
z.idx = 0
|
||||
|
||||
// Header consists of one mageic number, write it out.
|
||||
if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil {
|
||||
return err
|
||||
}
|
||||
z.Header.done = true
|
||||
if debugFlag {
|
||||
debug("wrote header %v", z.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// compressBlock compresses a block.
|
||||
func (z *WriterLegacy) compressBlock(data []byte) error {
|
||||
bSize := 2 * blockSize4M
|
||||
zdata := z.data[bSize:cap(z.data)]
|
||||
// The compressed block size cannot exceed the input's.
|
||||
var zn int
|
||||
|
||||
if level := z.Header.CompressionLevel; level != 0 {
|
||||
zn, _ = CompressBlockHC(data, zdata, level)
|
||||
} else {
|
||||
zn, _ = CompressBlock(data, zdata, z.hashtable[:])
|
||||
}
|
||||
|
||||
if debugFlag {
|
||||
debug("block compression %d => %d", len(data), zn)
|
||||
}
|
||||
zdata = zdata[:zn]
|
||||
|
||||
// Write the block.
|
||||
if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil {
|
||||
return err
|
||||
}
|
||||
written, err := z.dst.Write(zdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h := z.OnBlockDone; h != nil {
|
||||
h(written)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush flushes any pending compressed data to the underlying writer.
|
||||
// Flush does not return until the data has been written.
|
||||
// If the underlying writer returns an error, Flush returns that error.
|
||||
func (z *WriterLegacy) Flush() error {
|
||||
if debugFlag {
|
||||
debug("flush with index %d", z.idx)
|
||||
}
|
||||
if z.idx == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := z.data[:z.idx]
|
||||
z.idx = 0
|
||||
return z.compressBlock(data)
|
||||
}
|
||||
|
||||
// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *WriterLegacy) Close() error {
|
||||
if !z.Header.done {
|
||||
if err := z.writeHeader(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if debugFlag {
|
||||
debug("writing last empty block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset clears the state of the WriterLegacy z such that it is equivalent to its
|
||||
// initial state from NewWriterLegacy, but instead writing to w.
|
||||
// No access to the underlying io.Writer is performed.
|
||||
func (z *WriterLegacy) Reset(w io.Writer) {
|
||||
z.Header.Reset()
|
||||
z.dst = w
|
||||
z.idx = 0
|
||||
// reset hashtable to ensure deterministic output.
|
||||
for i := range z.hashtable {
|
||||
z.hashtable[i] = 0
|
||||
}
|
||||
}
|
308
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
Normal file
308
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
// Copyright 2015 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type HealthCheckResponse_ServingStatus int32
|
||||
|
||||
const (
|
||||
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
|
||||
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
|
||||
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
|
||||
HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
|
||||
)
|
||||
|
||||
// Enum value maps for HealthCheckResponse_ServingStatus.
|
||||
var (
|
||||
HealthCheckResponse_ServingStatus_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "SERVING",
|
||||
2: "NOT_SERVING",
|
||||
3: "SERVICE_UNKNOWN",
|
||||
}
|
||||
HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SERVING": 1,
|
||||
"NOT_SERVING": 2,
|
||||
"SERVICE_UNKNOWN": 3,
|
||||
}
|
||||
)
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
|
||||
p := new(HealthCheckResponse_ServingStatus)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
|
||||
return &file_grpc_health_v1_health_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HealthCheckRequest) Reset() {
|
||||
*x = HealthCheckRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HealthCheckRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *HealthCheckRequest) GetService() string {
|
||||
if x != nil {
|
||||
return x.Service
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HealthCheckResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HealthCheckResponse) Reset() {
|
||||
*x = HealthCheckResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HealthCheckResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return HealthCheckResponse_UNKNOWN
|
||||
}
|
||||
|
||||
var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_health_v1_health_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
|
||||
0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
|
||||
0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
|
||||
0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
|
||||
0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
|
||||
0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
|
||||
0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
|
||||
0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
|
||||
0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
|
||||
0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
|
||||
0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
|
||||
0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
|
||||
0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_grpc_health_v1_health_proto_rawDescOnce sync.Once
|
||||
file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
|
||||
file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
|
||||
})
|
||||
return file_grpc_health_v1_health_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_grpc_health_v1_health_proto_goTypes = []interface{}{
|
||||
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
|
||||
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
|
||||
}
|
||||
var file_grpc_health_v1_health_proto_depIdxs = []int32{
|
||||
0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
3, // [3:5] is the sub-list for method output_type
|
||||
1, // [1:3] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_grpc_health_v1_health_proto_init() }
|
||||
func file_grpc_health_v1_health_proto_init() {
|
||||
if File_grpc_health_v1_health_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HealthCheckRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HealthCheckResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_grpc_health_v1_health_proto_goTypes,
|
||||
DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
|
||||
EnumInfos: file_grpc_health_v1_health_proto_enumTypes,
|
||||
MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_health_v1_health_proto = out.File
|
||||
file_grpc_health_v1_health_proto_rawDesc = nil
|
||||
file_grpc_health_v1_health_proto_goTypes = nil
|
||||
file_grpc_health_v1_health_proto_depIdxs = nil
|
||||
}
|
218
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
Normal file
218
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2015 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HealthClient interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
|
||||
}
|
||||
|
||||
type healthClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
|
||||
return &healthClient{cc}
|
||||
}
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &healthWatchClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Health_WatchClient interface {
|
||||
Recv() (*HealthCheckResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type healthWatchClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
|
||||
m := new(HealthCheckResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// HealthServer is the server API for Health service.
|
||||
// All implementations should embed UnimplementedHealthServer
|
||||
// for forward compatibility
|
||||
type HealthServer interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||
}
|
||||
|
||||
// UnimplementedHealthServer should be embedded to have forward compatible implementations.
|
||||
type UnimplementedHealthServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
||||
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HealthServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeHealthServer interface {
|
||||
mustEmbedUnimplementedHealthServer()
|
||||
}
|
||||
|
||||
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
|
||||
s.RegisterService(&Health_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthCheckRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HealthServer).Check(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.health.v1.Health/Check",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(HealthCheckRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
|
||||
}
|
||||
|
||||
type Health_WatchServer interface {
|
||||
Send(*HealthCheckResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type healthWatchServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Health_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.health.v1.Health",
|
||||
HandlerType: (*HealthServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Check",
|
||||
Handler: _Health_Check_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Watch",
|
||||
Handler: _Health_Watch_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
810
vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
generated
vendored
Normal file
810
vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
generated
vendored
Normal file
@ -0,0 +1,810 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/struct.proto
|
||||
|
||||
// Package structpb contains generated types for google/protobuf/struct.proto.
|
||||
//
|
||||
// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
|
||||
// used to represent arbitrary JSON. The Value message represents a JSON value,
|
||||
// the Struct message represents a JSON object, and the ListValue message
|
||||
// represents a JSON array. See https://json.org for more information.
|
||||
//
|
||||
// The Value, Struct, and ListValue types have generated MarshalJSON and
|
||||
// UnmarshalJSON methods such that they serialize JSON equivalent to what the
|
||||
// messages themselves represent. Use of these types with the
|
||||
// "google.golang.org/protobuf/encoding/protojson" package
|
||||
// ensures that they will be serialized as their JSON equivalent.
|
||||
//
|
||||
//
|
||||
// Conversion to and from a Go interface
|
||||
//
|
||||
// The standard Go "encoding/json" package has functionality to serialize
|
||||
// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
|
||||
// ListValue.AsSlice methods can convert the protobuf message representation into
|
||||
// a form represented by interface{}, map[string]interface{}, and []interface{}.
|
||||
// This form can be used with other packages that operate on such data structures
|
||||
// and also directly with the standard json package.
|
||||
//
|
||||
// In order to convert the interface{}, map[string]interface{}, and []interface{}
|
||||
// forms back as Value, Struct, and ListValue messages, use the NewStruct,
|
||||
// NewList, and NewValue constructor functions.
|
||||
//
|
||||
//
|
||||
// Example usage
|
||||
//
|
||||
// Consider the following example JSON object:
|
||||
//
|
||||
// {
|
||||
// "firstName": "John",
|
||||
// "lastName": "Smith",
|
||||
// "isAlive": true,
|
||||
// "age": 27,
|
||||
// "address": {
|
||||
// "streetAddress": "21 2nd Street",
|
||||
// "city": "New York",
|
||||
// "state": "NY",
|
||||
// "postalCode": "10021-3100"
|
||||
// },
|
||||
// "phoneNumbers": [
|
||||
// {
|
||||
// "type": "home",
|
||||
// "number": "212 555-1234"
|
||||
// },
|
||||
// {
|
||||
// "type": "office",
|
||||
// "number": "646 555-4567"
|
||||
// }
|
||||
// ],
|
||||
// "children": [],
|
||||
// "spouse": null
|
||||
// }
|
||||
//
|
||||
// To construct a Value message representing the above JSON object:
|
||||
//
|
||||
// m, err := structpb.NewValue(map[string]interface{}{
|
||||
// "firstName": "John",
|
||||
// "lastName": "Smith",
|
||||
// "isAlive": true,
|
||||
// "age": 27,
|
||||
// "address": map[string]interface{}{
|
||||
// "streetAddress": "21 2nd Street",
|
||||
// "city": "New York",
|
||||
// "state": "NY",
|
||||
// "postalCode": "10021-3100",
|
||||
// },
|
||||
// "phoneNumbers": []interface{}{
|
||||
// map[string]interface{}{
|
||||
// "type": "home",
|
||||
// "number": "212 555-1234",
|
||||
// },
|
||||
// map[string]interface{}{
|
||||
// "type": "office",
|
||||
// "number": "646 555-4567",
|
||||
// },
|
||||
// },
|
||||
// "children": []interface{}{},
|
||||
// "spouse": nil,
|
||||
// })
|
||||
// if err != nil {
|
||||
// ... // handle error
|
||||
// }
|
||||
// ... // make use of m as a *structpb.Value
|
||||
//
|
||||
package structpb
|
||||
|
||||
import (
|
||||
base64 "encoding/base64"
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
utf8 "unicode/utf8"
|
||||
)
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
type NullValue int32
|
||||
|
||||
const (
|
||||
// Null value.
|
||||
NullValue_NULL_VALUE NullValue = 0
|
||||
)
|
||||
|
||||
// Enum value maps for NullValue.
|
||||
var (
|
||||
NullValue_name = map[int32]string{
|
||||
0: "NULL_VALUE",
|
||||
}
|
||||
NullValue_value = map[string]int32{
|
||||
"NULL_VALUE": 0,
|
||||
}
|
||||
)
|
||||
|
||||
func (x NullValue) Enum() *NullValue {
|
||||
p := new(NullValue)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x NullValue) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (NullValue) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (NullValue) Type() protoreflect.EnumType {
|
||||
return &file_google_protobuf_struct_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x NullValue) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NullValue.Descriptor instead.
|
||||
func (NullValue) EnumDescriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
type Struct struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Unordered map of dynamically typed values.
|
||||
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
// NewStruct constructs a Struct from a general-purpose Go map.
|
||||
// The map keys must be valid UTF-8.
|
||||
// The map values are converted using NewValue.
|
||||
func NewStruct(v map[string]interface{}) (*Struct, error) {
|
||||
x := &Struct{Fields: make(map[string]*Value, len(v))}
|
||||
for k, v := range v {
|
||||
if !utf8.ValidString(k) {
|
||||
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
|
||||
}
|
||||
var err error
|
||||
x.Fields[k], err = NewValue(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// AsMap converts x to a general-purpose Go map.
|
||||
// The map values are converted by calling Value.AsInterface.
|
||||
func (x *Struct) AsMap() map[string]interface{} {
|
||||
vs := make(map[string]interface{})
|
||||
for k, v := range x.GetFields() {
|
||||
vs[k] = v.AsInterface()
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (x *Struct) MarshalJSON() ([]byte, error) {
|
||||
return protojson.Marshal(x)
|
||||
}
|
||||
|
||||
func (x *Struct) UnmarshalJSON(b []byte) error {
|
||||
return protojson.Unmarshal(b, x)
|
||||
}
|
||||
|
||||
func (x *Struct) Reset() {
|
||||
*x = Struct{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Struct) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Struct) ProtoMessage() {}
|
||||
|
||||
func (x *Struct) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
|
||||
func (*Struct) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Struct) GetFields() map[string]*Value {
|
||||
if x != nil {
|
||||
return x.Fields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of that
|
||||
// variants, absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
type Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The kind of value.
|
||||
//
|
||||
// Types that are assignable to Kind:
|
||||
// *Value_NullValue
|
||||
// *Value_NumberValue
|
||||
// *Value_StringValue
|
||||
// *Value_BoolValue
|
||||
// *Value_StructValue
|
||||
// *Value_ListValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||
}
|
||||
|
||||
// NewValue constructs a Value from a general-purpose Go interface.
|
||||
//
|
||||
// ╔════════════════════════╤════════════════════════════════════════════╗
|
||||
// ║ Go type │ Conversion ║
|
||||
// ╠════════════════════════╪════════════════════════════════════════════╣
|
||||
// ║ nil │ stored as NullValue ║
|
||||
// ║ bool │ stored as BoolValue ║
|
||||
// ║ int, int32, int64 │ stored as NumberValue ║
|
||||
// ║ uint, uint32, uint64 │ stored as NumberValue ║
|
||||
// ║ float32, float64 │ stored as NumberValue ║
|
||||
// ║ string │ stored as StringValue; must be valid UTF-8 ║
|
||||
// ║ []byte │ stored as StringValue; base64-encoded ║
|
||||
// ║ map[string]interface{} │ stored as StructValue ║
|
||||
// ║ []interface{} │ stored as ListValue ║
|
||||
// ╚════════════════════════╧════════════════════════════════════════════╝
|
||||
//
|
||||
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
|
||||
// is possible since they are stored as a float64.
|
||||
func NewValue(v interface{}) (*Value, error) {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return NewNullValue(), nil
|
||||
case bool:
|
||||
return NewBoolValue(v), nil
|
||||
case int:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case int32:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case int64:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case uint:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case uint32:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case uint64:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case float32:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case float64:
|
||||
return NewNumberValue(float64(v)), nil
|
||||
case string:
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
|
||||
}
|
||||
return NewStringValue(v), nil
|
||||
case []byte:
|
||||
s := base64.StdEncoding.EncodeToString(v)
|
||||
return NewStringValue(s), nil
|
||||
case map[string]interface{}:
|
||||
v2, err := NewStruct(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewStructValue(v2), nil
|
||||
case []interface{}:
|
||||
v2, err := NewList(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewListValue(v2), nil
|
||||
default:
|
||||
return nil, protoimpl.X.NewError("invalid type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
// NewNullValue constructs a new null Value.
|
||||
func NewNullValue() *Value {
|
||||
return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
|
||||
}
|
||||
|
||||
// NewBoolValue constructs a new boolean Value.
|
||||
func NewBoolValue(v bool) *Value {
|
||||
return &Value{Kind: &Value_BoolValue{BoolValue: v}}
|
||||
}
|
||||
|
||||
// NewNumberValue constructs a new number Value.
|
||||
func NewNumberValue(v float64) *Value {
|
||||
return &Value{Kind: &Value_NumberValue{NumberValue: v}}
|
||||
}
|
||||
|
||||
// NewStringValue constructs a new string Value.
|
||||
func NewStringValue(v string) *Value {
|
||||
return &Value{Kind: &Value_StringValue{StringValue: v}}
|
||||
}
|
||||
|
||||
// NewStructValue constructs a new struct Value.
|
||||
func NewStructValue(v *Struct) *Value {
|
||||
return &Value{Kind: &Value_StructValue{StructValue: v}}
|
||||
}
|
||||
|
||||
// NewListValue constructs a new list Value.
|
||||
func NewListValue(v *ListValue) *Value {
|
||||
return &Value{Kind: &Value_ListValue{ListValue: v}}
|
||||
}
|
||||
|
||||
// AsInterface converts x to a general-purpose Go interface.
|
||||
//
|
||||
// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
|
||||
// semantically equivalent JSON (assuming no errors occur).
|
||||
//
|
||||
// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
|
||||
// converted as strings to remain compatible with MarshalJSON.
|
||||
func (x *Value) AsInterface() interface{} {
|
||||
switch v := x.GetKind().(type) {
|
||||
case *Value_NumberValue:
|
||||
if v != nil {
|
||||
switch {
|
||||
case math.IsNaN(v.NumberValue):
|
||||
return "NaN"
|
||||
case math.IsInf(v.NumberValue, +1):
|
||||
return "Infinity"
|
||||
case math.IsInf(v.NumberValue, -1):
|
||||
return "-Infinity"
|
||||
default:
|
||||
return v.NumberValue
|
||||
}
|
||||
}
|
||||
case *Value_StringValue:
|
||||
if v != nil {
|
||||
return v.StringValue
|
||||
}
|
||||
case *Value_BoolValue:
|
||||
if v != nil {
|
||||
return v.BoolValue
|
||||
}
|
||||
case *Value_StructValue:
|
||||
if v != nil {
|
||||
return v.StructValue.AsMap()
|
||||
}
|
||||
case *Value_ListValue:
|
||||
if v != nil {
|
||||
return v.ListValue.AsSlice()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) MarshalJSON() ([]byte, error) {
|
||||
return protojson.Marshal(x)
|
||||
}
|
||||
|
||||
func (x *Value) UnmarshalJSON(b []byte) error {
|
||||
return protojson.Unmarshal(b, x)
|
||||
}
|
||||
|
||||
func (x *Value) Reset() {
|
||||
*x = Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Value) ProtoMessage() {}
|
||||
|
||||
func (x *Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Value.ProtoReflect.Descriptor instead.
|
||||
func (*Value) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *Value) GetKind() isValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetNullValue() NullValue {
|
||||
if x, ok := x.GetKind().(*Value_NullValue); ok {
|
||||
return x.NullValue
|
||||
}
|
||||
return NullValue_NULL_VALUE
|
||||
}
|
||||
|
||||
func (x *Value) GetNumberValue() float64 {
|
||||
if x, ok := x.GetKind().(*Value_NumberValue); ok {
|
||||
return x.NumberValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetStringValue() string {
|
||||
if x, ok := x.GetKind().(*Value_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Value) GetBoolValue() bool {
|
||||
if x, ok := x.GetKind().(*Value_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Value) GetStructValue() *Struct {
|
||||
if x, ok := x.GetKind().(*Value_StructValue); ok {
|
||||
return x.StructValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetListValue() *ListValue {
|
||||
if x, ok := x.GetKind().(*Value_ListValue); ok {
|
||||
return x.ListValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isValue_Kind interface {
|
||||
isValue_Kind()
|
||||
}
|
||||
|
||||
type Value_NullValue struct {
|
||||
// Represents a null value.
|
||||
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||
}
|
||||
|
||||
type Value_NumberValue struct {
|
||||
// Represents a double value.
|
||||
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StringValue struct {
|
||||
// Represents a string value.
|
||||
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_BoolValue struct {
|
||||
// Represents a boolean value.
|
||||
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StructValue struct {
|
||||
// Represents a structured value.
|
||||
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ListValue struct {
|
||||
// Represents a repeated `Value`.
|
||||
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Value_NullValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_NumberValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StringValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StructValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ListValue) isValue_Kind() {}
|
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
type ListValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Repeated field of dynamically typed values.
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
// NewList constructs a ListValue from a general-purpose Go slice.
|
||||
// The slice elements are converted using NewValue.
|
||||
func NewList(v []interface{}) (*ListValue, error) {
|
||||
x := &ListValue{Values: make([]*Value, len(v))}
|
||||
for i, v := range v {
|
||||
var err error
|
||||
x.Values[i], err = NewValue(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// AsSlice converts x to a general-purpose Go slice.
|
||||
// The slice elements are converted by calling Value.AsInterface.
|
||||
func (x *ListValue) AsSlice() []interface{} {
|
||||
vs := make([]interface{}, len(x.GetValues()))
|
||||
for i, v := range x.GetValues() {
|
||||
vs[i] = v.AsInterface()
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (x *ListValue) MarshalJSON() ([]byte, error) {
|
||||
return protojson.Marshal(x)
|
||||
}
|
||||
|
||||
func (x *ListValue) UnmarshalJSON(b []byte) error {
|
||||
return protojson.Unmarshal(b, x)
|
||||
}
|
||||
|
||||
func (x *ListValue) Reset() {
|
||||
*x = ListValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListValue) ProtoMessage() {}
|
||||
|
||||
func (x *ListValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_struct_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
|
||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListValue) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_protobuf_struct_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_struct_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
|
||||
0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
|
||||
0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
|
||||
0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
|
||||
0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
|
||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
|
||||
0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
|
||||
0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
|
||||
0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
|
||||
0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
|
||||
0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
|
||||
0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
|
||||
0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
|
||||
0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
|
||||
0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
|
||||
0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
|
||||
0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
|
||||
0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
|
||||
0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
|
||||
0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
|
||||
0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_struct_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_struct_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_google_protobuf_struct_proto_goTypes = []interface{}{
|
||||
(NullValue)(0), // 0: google.protobuf.NullValue
|
||||
(*Struct)(nil), // 1: google.protobuf.Struct
|
||||
(*Value)(nil), // 2: google.protobuf.Value
|
||||
(*ListValue)(nil), // 3: google.protobuf.ListValue
|
||||
nil, // 4: google.protobuf.Struct.FieldsEntry
|
||||
}
|
||||
var file_google_protobuf_struct_proto_depIdxs = []int32{
|
||||
4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
|
||||
0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
|
||||
1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
|
||||
3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
|
||||
2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
|
||||
2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_struct_proto_init() }
|
||||
func file_google_protobuf_struct_proto_init() {
|
||||
if File_google_protobuf_struct_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Struct); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
|
||||
(*Value_NullValue)(nil),
|
||||
(*Value_NumberValue)(nil),
|
||||
(*Value_StringValue)(nil),
|
||||
(*Value_BoolValue)(nil),
|
||||
(*Value_StructValue)(nil),
|
||||
(*Value_ListValue)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_struct_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
|
||||
EnumInfos: file_google_protobuf_struct_proto_enumTypes,
|
||||
MessageInfos: file_google_protobuf_struct_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_struct_proto = out.File
|
||||
file_google_protobuf_struct_proto_rawDesc = nil
|
||||
file_google_protobuf_struct_proto_goTypes = nil
|
||||
file_google_protobuf_struct_proto_depIdxs = nil
|
||||
}
|
4
vendor/gopkg.in/square/go-jose.v2/.travis.yml
generated
vendored
4
vendor/gopkg.in/square/go-jose.v2/.travis.yml
generated
vendored
@ -8,8 +8,8 @@ matrix:
|
||||
- go: tip
|
||||
|
||||
go:
|
||||
- '1.11.x'
|
||||
- '1.12.x'
|
||||
- '1.14.x'
|
||||
- '1.15.x'
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/square/go-jose.v2
|
||||
|
2
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
2
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
@ -150,7 +150,7 @@ func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
||||
}
|
||||
|
||||
// resize ensures the the given slice has a capacity of at least n bytes.
|
||||
// resize ensures that the given slice has a capacity of at least n bytes.
|
||||
// If the capacity of the slice is less than n, a new slice is allocated
|
||||
// and the existing data will be copied.
|
||||
func resize(in []byte, n uint64) (head, tail []byte) {
|
||||
|
1
vendor/gopkg.in/square/go-jose.v2/crypter.go
generated
vendored
1
vendor/gopkg.in/square/go-jose.v2/crypter.go
generated
vendored
@ -216,6 +216,7 @@ func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *Encrypter
|
||||
|
||||
if opts != nil {
|
||||
encrypter.compressionAlg = opts.Compression
|
||||
encrypter.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
for _, recipient := range rcpts {
|
||||
|
52
vendor/gopkg.in/square/go-jose.v2/json/decode.go
generated
vendored
52
vendor/gopkg.in/square/go-jose.v2/json/decode.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@ -245,6 +246,18 @@ func isValidNumber(s string) bool {
|
||||
return s == ""
|
||||
}
|
||||
|
||||
type NumberUnmarshalType int
|
||||
|
||||
const (
|
||||
// unmarshal a JSON number into an interface{} as a float64
|
||||
UnmarshalFloat NumberUnmarshalType = iota
|
||||
// unmarshal a JSON number into an interface{} as a `json.Number`
|
||||
UnmarshalJSONNumber
|
||||
// unmarshal a JSON number into an interface{} as a int64
|
||||
// if value is an integer otherwise float64
|
||||
UnmarshalIntOrFloat
|
||||
)
|
||||
|
||||
// decodeState represents the state while decoding a JSON value.
|
||||
type decodeState struct {
|
||||
data []byte
|
||||
@ -252,7 +265,7 @@ type decodeState struct {
|
||||
scan scanner
|
||||
nextscan scanner // for calls to nextValue
|
||||
savedError error
|
||||
useNumber bool
|
||||
numberType NumberUnmarshalType
|
||||
}
|
||||
|
||||
// errPhase is used for errors that should not happen unless
|
||||
@ -723,17 +736,38 @@ func (d *decodeState) literal(v reflect.Value) {
|
||||
d.literalStore(d.data[start:d.off], v, false)
|
||||
}
|
||||
|
||||
// convertNumber converts the number literal s to a float64 or a Number
|
||||
// depending on the setting of d.useNumber.
|
||||
// convertNumber converts the number literal s to a float64, int64 or a Number
|
||||
// depending on d.numberDecodeType.
|
||||
func (d *decodeState) convertNumber(s string) (interface{}, error) {
|
||||
if d.useNumber {
|
||||
switch d.numberType {
|
||||
|
||||
case UnmarshalJSONNumber:
|
||||
return Number(s), nil
|
||||
case UnmarshalIntOrFloat:
|
||||
v, err := strconv.ParseInt(s, 10, 64)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// tries to parse integer number in scientific notation
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
|
||||
}
|
||||
|
||||
// if it has no decimal value use int64
|
||||
if fi, fd := math.Modf(f); fd == 0.0 {
|
||||
return int64(fi), nil
|
||||
}
|
||||
return f, nil
|
||||
default:
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
|
||||
}
|
||||
return f, nil
|
||||
|
||||
}
|
||||
|
||||
var numberType = reflect.TypeOf(Number(""))
|
||||
|
7
vendor/gopkg.in/square/go-jose.v2/json/stream.go
generated
vendored
7
vendor/gopkg.in/square/go-jose.v2/json/stream.go
generated
vendored
@ -31,9 +31,14 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// Deprecated: Use `SetNumberType` instead
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
func (dec *Decoder) UseNumber() { dec.d.numberType = UnmarshalJSONNumber }
|
||||
|
||||
// SetNumberType causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number, float64 or int64 depending on `t` enum value.
|
||||
func (dec *Decoder) SetNumberType(t NumberUnmarshalType) { dec.d.numberType = t }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
|
6
vendor/gopkg.in/square/go-jose.v2/jwk.go
generated
vendored
6
vendor/gopkg.in/square/go-jose.v2/jwk.go
generated
vendored
@ -238,7 +238,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
||||
|
||||
if certPub != nil && keyPub != nil {
|
||||
if !reflect.DeepEqual(certPub, keyPub) {
|
||||
return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields to not match")
|
||||
return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields do not match")
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,7 +332,7 @@ func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
|
||||
|
||||
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
|
||||
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
|
||||
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}`
|
||||
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP","x":"%s"}`
|
||||
|
||||
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
|
||||
coordLength := curveSize(curve)
|
||||
@ -406,7 +406,7 @@ func (k *JSONWebKey) IsPublic() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Public creates JSONWebKey with corresponding publik key if JWK represents asymmetric private key.
|
||||
// Public creates JSONWebKey with corresponding public key if JWK represents asymmetric private key.
|
||||
func (k *JSONWebKey) Public() JSONWebKey {
|
||||
if k.IsPublic() {
|
||||
return *k
|
||||
|
2
vendor/gopkg.in/square/go-jose.v2/jwt/builder.go
generated
vendored
2
vendor/gopkg.in/square/go-jose.v2/jwt/builder.go
generated
vendored
@ -143,7 +143,7 @@ func normalize(i interface{}) (map[string]interface{}, error) {
|
||||
}
|
||||
|
||||
d := json.NewDecoder(bytes.NewReader(raw))
|
||||
d.UseNumber()
|
||||
d.SetNumberType(json.UnmarshalJSONNumber)
|
||||
|
||||
if err := d.Decode(&m); err != nil {
|
||||
return nil, err
|
||||
|
3
vendor/gopkg.in/square/go-jose.v2/jwt/claims.go
generated
vendored
3
vendor/gopkg.in/square/go-jose.v2/jwt/claims.go
generated
vendored
@ -36,8 +36,9 @@ type Claims struct {
|
||||
}
|
||||
|
||||
// NumericDate represents date and time as the number of seconds since the
|
||||
// epoch, including leap seconds. Non-integer values can be represented
|
||||
// epoch, ignoring leap seconds. Non-integer values can be represented
|
||||
// in the serialized format, but we round to the nearest second.
|
||||
// See RFC7519 Section 2: https://tools.ietf.org/html/rfc7519#section-2
|
||||
type NumericDate int64
|
||||
|
||||
// NewNumericDate constructs NumericDate from time.Time value.
|
||||
|
10
vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go
generated
vendored
10
vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go
generated
vendored
@ -137,8 +137,14 @@ func ParseSignedAndEncrypted(s string) (*NestedJSONWebToken, error) {
|
||||
}
|
||||
|
||||
func tryJWKS(headers []jose.Header, key interface{}) interface{} {
|
||||
jwks, ok := key.(*jose.JSONWebKeySet)
|
||||
if !ok {
|
||||
var jwks jose.JSONWebKeySet
|
||||
|
||||
switch jwksType := key.(type) {
|
||||
case *jose.JSONWebKeySet:
|
||||
jwks = *jwksType
|
||||
case jose.JSONWebKeySet:
|
||||
jwks = jwksType
|
||||
default:
|
||||
return key
|
||||
}
|
||||
|
||||
|
2
vendor/gopkg.in/square/go-jose.v2/opaque.go
generated
vendored
2
vendor/gopkg.in/square/go-jose.v2/opaque.go
generated
vendored
@ -17,7 +17,7 @@
|
||||
package jose
|
||||
|
||||
// OpaqueSigner is an interface that supports signing payloads with opaque
|
||||
// private key(s). Private key operations preformed by implementors may, for
|
||||
// private key(s). Private key operations performed by implementers may, for
|
||||
// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
|
||||
// transparently to the user of this interface.
|
||||
type OpaqueSigner interface {
|
||||
|
6
vendor/gopkg.in/square/go-jose.v2/shared.go
generated
vendored
6
vendor/gopkg.in/square/go-jose.v2/shared.go
generated
vendored
@ -183,7 +183,7 @@ type Header struct {
|
||||
// Unverified certificate chain parsed from x5c header.
|
||||
certificates []*x509.Certificate
|
||||
|
||||
// Any headers not recognised above get unmarshaled
|
||||
// Any headers not recognised above get unmarshalled
|
||||
// from JSON in a generic manner and placed in this map.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
@ -295,12 +295,12 @@ func (parsed rawHeader) getAPV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerAPV)
|
||||
}
|
||||
|
||||
// getIV extracts parsed "iv" frpom the raw JSON.
|
||||
// getIV extracts parsed "iv" from the raw JSON.
|
||||
func (parsed rawHeader) getIV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerIV)
|
||||
}
|
||||
|
||||
// getTag extracts parsed "tag" frpom the raw JSON.
|
||||
// getTag extracts parsed "tag" from the raw JSON.
|
||||
func (parsed rawHeader) getTag() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerTag)
|
||||
}
|
||||
|
32
vendor/modules.txt
vendored
32
vendor/modules.txt
vendored
@ -108,7 +108,7 @@ github.com/beorn7/perks/quantile
|
||||
# github.com/blang/semver/v4 v4.0.0
|
||||
## explicit; go 1.14
|
||||
github.com/blang/semver/v4
|
||||
# github.com/cenkalti/backoff/v3 v3.0.0
|
||||
# github.com/cenkalti/backoff/v3 v3.2.2
|
||||
## explicit; go 1.12
|
||||
github.com/cenkalti/backoff/v3
|
||||
# github.com/cenkalti/backoff/v4 v4.1.3
|
||||
@ -167,12 +167,14 @@ github.com/evanphx/json-patch
|
||||
# github.com/evanphx/json-patch/v5 v5.6.0
|
||||
## explicit; go 1.12
|
||||
github.com/evanphx/json-patch/v5
|
||||
# github.com/fatih/color v1.9.0
|
||||
# github.com/fatih/color v1.13.0
|
||||
## explicit; go 1.13
|
||||
github.com/fatih/color
|
||||
# github.com/felixge/httpsnoop v1.0.3
|
||||
## explicit; go 1.13
|
||||
github.com/felixge/httpsnoop
|
||||
# github.com/frankban/quicktest v1.13.0
|
||||
## explicit; go 1.13
|
||||
# github.com/fsnotify/fsnotify v1.6.0
|
||||
## explicit; go 1.16
|
||||
github.com/fsnotify/fsnotify
|
||||
@ -205,8 +207,6 @@ github.com/go-openapi/jsonreference/internal
|
||||
# github.com/go-openapi/swag v0.22.3
|
||||
## explicit; go 1.18
|
||||
github.com/go-openapi/swag
|
||||
# github.com/go-sql-driver/mysql v1.5.0
|
||||
## explicit; go 1.10
|
||||
# github.com/gogo/protobuf v1.3.2
|
||||
## explicit; go 1.15
|
||||
github.com/gogo/protobuf/proto
|
||||
@ -264,8 +264,8 @@ github.com/grpc-ecosystem/go-grpc-middleware
|
||||
# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
## explicit
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus
|
||||
# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0
|
||||
## explicit; go 1.14
|
||||
# github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3
|
||||
## explicit; go 1.17
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
|
||||
@ -275,7 +275,7 @@ github.com/hashicorp/errwrap
|
||||
# github.com/hashicorp/go-cleanhttp v0.5.2
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-cleanhttp
|
||||
# github.com/hashicorp/go-hclog v0.16.2
|
||||
# github.com/hashicorp/go-hclog v1.1.0
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-hclog
|
||||
# github.com/hashicorp/go-multierror v1.1.1
|
||||
@ -296,8 +296,8 @@ github.com/hashicorp/go-secure-stdlib/strutil
|
||||
# github.com/hashicorp/go-sockaddr v1.0.2
|
||||
## explicit
|
||||
github.com/hashicorp/go-sockaddr
|
||||
# github.com/hashicorp/hcl v1.0.0
|
||||
## explicit
|
||||
# github.com/hashicorp/hcl v1.0.1-vault-3
|
||||
## explicit; go 1.15
|
||||
github.com/hashicorp/hcl
|
||||
github.com/hashicorp/hcl/hcl/ast
|
||||
github.com/hashicorp/hcl/hcl/parser
|
||||
@ -307,8 +307,8 @@ github.com/hashicorp/hcl/hcl/token
|
||||
github.com/hashicorp/hcl/json/parser
|
||||
github.com/hashicorp/hcl/json/scanner
|
||||
github.com/hashicorp/hcl/json/token
|
||||
# github.com/hashicorp/vault v1.4.2
|
||||
## explicit; go 1.13
|
||||
# github.com/hashicorp/vault v1.9.9
|
||||
## explicit; go 1.17
|
||||
github.com/hashicorp/vault/command/agent/auth
|
||||
github.com/hashicorp/vault/command/agent/auth/kubernetes
|
||||
# github.com/hashicorp/vault/api v1.9.0
|
||||
@ -333,8 +333,6 @@ github.com/josharian/intern
|
||||
# github.com/json-iterator/go v1.1.12
|
||||
## explicit; go 1.12
|
||||
github.com/json-iterator/go
|
||||
# github.com/kr/pretty v0.2.1
|
||||
## explicit; go 1.12
|
||||
# github.com/kubernetes-csi/csi-lib-utils v0.13.0
|
||||
## explicit; go 1.18
|
||||
github.com/kubernetes-csi/csi-lib-utils/connection
|
||||
@ -426,7 +424,7 @@ github.com/opencontainers/selinux/pkg/pwalkdir
|
||||
# github.com/openshift/api v0.0.0-20210927171657-636513e97fda
|
||||
## explicit; go 1.16
|
||||
github.com/openshift/api/security/v1
|
||||
# github.com/pierrec/lz4 v2.5.2+incompatible
|
||||
# github.com/pierrec/lz4 v2.6.1+incompatible
|
||||
## explicit
|
||||
github.com/pierrec/lz4
|
||||
github.com/pierrec/lz4/internal/xxh32
|
||||
@ -619,6 +617,8 @@ golang.org/x/time/rate
|
||||
# gomodules.xyz/jsonpatch/v2 v2.2.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||
## explicit; go 1.12
|
||||
gomodules.xyz/jsonpatch/v2
|
||||
# google.golang.org/api v0.103.0
|
||||
## explicit; go 1.19
|
||||
# google.golang.org/appengine v1.6.7
|
||||
## explicit; go 1.11
|
||||
google.golang.org/appengine/internal
|
||||
@ -653,6 +653,7 @@ google.golang.org/grpc/encoding
|
||||
google.golang.org/grpc/encoding/gzip
|
||||
google.golang.org/grpc/encoding/proto
|
||||
google.golang.org/grpc/grpclog
|
||||
google.golang.org/grpc/health/grpc_health_v1
|
||||
google.golang.org/grpc/internal
|
||||
google.golang.org/grpc/internal/backoff
|
||||
google.golang.org/grpc/internal/balancer/gracefulswitch
|
||||
@ -719,12 +720,13 @@ google.golang.org/protobuf/types/descriptorpb
|
||||
google.golang.org/protobuf/types/known/anypb
|
||||
google.golang.org/protobuf/types/known/durationpb
|
||||
google.golang.org/protobuf/types/known/fieldmaskpb
|
||||
google.golang.org/protobuf/types/known/structpb
|
||||
google.golang.org/protobuf/types/known/timestamppb
|
||||
google.golang.org/protobuf/types/known/wrapperspb
|
||||
# gopkg.in/inf.v0 v0.9.1
|
||||
## explicit
|
||||
gopkg.in/inf.v0
|
||||
# gopkg.in/square/go-jose.v2 v2.5.1
|
||||
# gopkg.in/square/go-jose.v2 v2.6.0
|
||||
## explicit
|
||||
gopkg.in/square/go-jose.v2
|
||||
gopkg.in/square/go-jose.v2/cipher
|
||||
|
Reference in New Issue
Block a user