mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update github.com/libopenstorage/secrets to latest
With this update, we no longer import github.com/hashicorp/vault which now is under BSL license. https://github.com/hashicorp/vault/blob/main/LICENSE resolves: #4196 Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
1
vendor/github.com/hashicorp/go-hclog/.gitignore
generated
vendored
1
vendor/github.com/hashicorp/go-hclog/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
.idea*
|
21
vendor/github.com/hashicorp/go-hclog/LICENSE
generated
vendored
21
vendor/github.com/hashicorp/go-hclog/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 HashiCorp
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
148
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
148
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
@ -1,148 +0,0 @@
|
||||
# go-hclog
|
||||
|
||||
[][godocs]
|
||||
|
||||
[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
|
||||
|
||||
`go-hclog` is a package for Go that provides a simple key/value logging
|
||||
interface for use in development and production environments.
|
||||
|
||||
It provides logging levels that provide decreased output based upon the
|
||||
desired amount of output, unlike the standard library `log` package.
|
||||
|
||||
It provides `Printf` style logging of values via `hclog.Fmt()`.
|
||||
|
||||
It provides a human readable output mode for use in development as well as
|
||||
JSON output mode for production.
|
||||
|
||||
## Stability Note
|
||||
|
||||
This library has reached 1.0 stability. Its API can be considered solidified
|
||||
and promised through future versions.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/go-hclog`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/go-hclog
|
||||
|
||||
## Usage
|
||||
|
||||
### Use the global logger
|
||||
|
||||
```go
|
||||
hclog.Default().Info("hello world")
|
||||
```
|
||||
|
||||
```text
|
||||
2017-07-05T16:15:55.167-0700 [INFO ] hello world
|
||||
```
|
||||
|
||||
(Note timestamps are removed in future examples for brevity.)
|
||||
|
||||
### Create a new logger
|
||||
|
||||
```go
|
||||
appLogger := hclog.New(&hclog.LoggerOptions{
|
||||
Name: "my-app",
|
||||
Level: hclog.LevelFromString("DEBUG"),
|
||||
})
|
||||
```
|
||||
|
||||
### Emit an Info level message with 2 key/value pairs
|
||||
|
||||
```go
|
||||
input := "5.5"
|
||||
_, err := strconv.ParseInt(input, 10, 32)
|
||||
if err != nil {
|
||||
appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
|
||||
}
|
||||
```
|
||||
|
||||
```text
|
||||
... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
|
||||
```
|
||||
|
||||
### Create a new Logger for a major subsystem
|
||||
|
||||
```go
|
||||
subsystemLogger := appLogger.Named("transport")
|
||||
subsystemLogger.Info("we are transporting something")
|
||||
```
|
||||
|
||||
```text
|
||||
... [INFO ] my-app.transport: we are transporting something
|
||||
```
|
||||
|
||||
Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
|
||||
reflecting both the application and subsystem names.
|
||||
|
||||
### Create a new Logger with fixed key/value pairs
|
||||
|
||||
Using `With()` will include a specific key-value pair in all messages emitted
|
||||
by that logger.
|
||||
|
||||
```go
|
||||
requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
|
||||
requestLogger := subsystemLogger.With("request", requestID)
|
||||
requestLogger.Info("we are transporting a request")
|
||||
```
|
||||
|
||||
```text
|
||||
... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
|
||||
```
|
||||
|
||||
This allows sub Loggers to be context specific without having to thread that
|
||||
into all the callers.
|
||||
|
||||
### Using `hclog.Fmt()`
|
||||
|
||||
```go
|
||||
totalBandwidth := 200
|
||||
appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
|
||||
```
|
||||
|
||||
```text
|
||||
... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
|
||||
```
|
||||
|
||||
### Use this with code that uses the standard library logger
|
||||
|
||||
If you want to use the standard library's `log.Logger` interface you can wrap
|
||||
`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
|
||||
it with the familiar `Println()`, `Printf()`, etc. For example:
|
||||
|
||||
```go
|
||||
stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
|
||||
InferLevels: true,
|
||||
})
|
||||
// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
|
||||
stdLogger.Printf("[DEBUG] %+v", stdLogger)
|
||||
```
|
||||
|
||||
```text
|
||||
... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
|
||||
```
|
||||
|
||||
Alternatively, you may configure the system-wide logger:
|
||||
|
||||
```go
|
||||
// log the standard logger from 'import "log"'
|
||||
log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(0)
|
||||
|
||||
log.Printf("[DEBUG] %d", 42)
|
||||
```
|
||||
|
||||
```text
|
||||
... [DEBUG] my-app: 42
|
||||
```
|
||||
|
||||
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
|
||||
specify `InferLevels: true`, you will not see any output here. You must change
|
||||
`appLogger` to `DEBUG` to see output. See the docs for more information.
|
||||
|
||||
If the log lines start with a timestamp you can use the
|
||||
`InferLevelsWithTimestamp` option to try and ignore them.
|
29
vendor/github.com/hashicorp/go-hclog/colorize_unix.go
generated
vendored
29
vendor/github.com/hashicorp/go-hclog/colorize_unix.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// setColorization will mutate the values of this logger
|
||||
// to appropriately configure colorization options. It provides
|
||||
// a wrapper to the output stream on Windows systems.
|
||||
func (l *intLogger) setColorization(opts *LoggerOptions) {
|
||||
switch opts.Color {
|
||||
case ColorOff:
|
||||
fallthrough
|
||||
case ForceColor:
|
||||
return
|
||||
case AutoColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
isUnixTerm := isatty.IsTerminal(fi.Fd())
|
||||
isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd())
|
||||
isTerm := isUnixTerm || isCygwinTerm
|
||||
if !isTerm {
|
||||
l.headerColor = ColorOff
|
||||
l.writer.color = ColorOff
|
||||
}
|
||||
}
|
||||
}
|
38
vendor/github.com/hashicorp/go-hclog/colorize_windows.go
generated
vendored
38
vendor/github.com/hashicorp/go-hclog/colorize_windows.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// setColorization will mutate the values of this logger
|
||||
// to appropriately configure colorization options. It provides
|
||||
// a wrapper to the output stream on Windows systems.
|
||||
func (l *intLogger) setColorization(opts *LoggerOptions) {
|
||||
switch opts.Color {
|
||||
case ColorOff:
|
||||
return
|
||||
case ForceColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
l.writer.w = colorable.NewColorable(fi)
|
||||
case AutoColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
isUnixTerm := isatty.IsTerminal(os.Stdout.Fd())
|
||||
isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd())
|
||||
isTerm := isUnixTerm || isCygwinTerm
|
||||
if !isTerm {
|
||||
l.writer.color = ColorOff
|
||||
l.headerColor = ColorOff
|
||||
return
|
||||
}
|
||||
|
||||
if l.headerColor == ColorOff {
|
||||
l.writer.w = colorable.NewColorable(fi)
|
||||
}
|
||||
}
|
||||
}
|
38
vendor/github.com/hashicorp/go-hclog/context.go
generated
vendored
38
vendor/github.com/hashicorp/go-hclog/context.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// WithContext inserts a logger into the context and is retrievable
|
||||
// with FromContext. The optional args can be set with the same syntax as
|
||||
// Logger.With to set fields on the inserted logger. This will not modify
|
||||
// the logger argument in-place.
|
||||
func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
|
||||
// While we could call logger.With even with zero args, we have this
|
||||
// check to avoid unnecessary allocations around creating a copy of a
|
||||
// logger.
|
||||
if len(args) > 0 {
|
||||
logger = logger.With(args...)
|
||||
}
|
||||
|
||||
return context.WithValue(ctx, contextKey, logger)
|
||||
}
|
||||
|
||||
// FromContext returns a logger from the context. This will return L()
|
||||
// (the default logger) if no logger is found in the context. Therefore,
|
||||
// this will never return a nil value.
|
||||
func FromContext(ctx context.Context) Logger {
|
||||
logger, _ := ctx.Value(contextKey).(Logger)
|
||||
if logger == nil {
|
||||
return L()
|
||||
}
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
// Unexported new type so that our context key never collides with another.
|
||||
type contextKeyType struct{}
|
||||
|
||||
// contextKey is the key used for the context to store the logger.
|
||||
var contextKey = contextKeyType{}
|
71
vendor/github.com/hashicorp/go-hclog/exclude.go
generated
vendored
71
vendor/github.com/hashicorp/go-hclog/exclude.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExcludeByMessage provides a simple way to build a list of log messages that
|
||||
// can be queried and matched. This is meant to be used with the Exclude
|
||||
// option on Options to suppress log messages. This does not hold any mutexs
|
||||
// within itself, so normal usage would be to Add entries at setup and none after
|
||||
// Exclude is going to be called. Exclude is called with a mutex held within
|
||||
// the Logger, so that doesn't need to use a mutex. Example usage:
|
||||
//
|
||||
// f := new(ExcludeByMessage)
|
||||
// f.Add("Noisy log message text")
|
||||
// appLogger.Exclude = f.Exclude
|
||||
type ExcludeByMessage struct {
|
||||
messages map[string]struct{}
|
||||
}
|
||||
|
||||
// Add a message to be filtered. Do not call this after Exclude is to be called
|
||||
// due to concurrency issues.
|
||||
func (f *ExcludeByMessage) Add(msg string) {
|
||||
if f.messages == nil {
|
||||
f.messages = make(map[string]struct{})
|
||||
}
|
||||
|
||||
f.messages[msg] = struct{}{}
|
||||
}
|
||||
|
||||
// Return true if the given message should be included
|
||||
func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool {
|
||||
_, ok := f.messages[msg]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ExcludeByPrefix is a simple type to match a message string that has a common prefix.
|
||||
type ExcludeByPrefix string
|
||||
|
||||
// Matches an message that starts with the prefix.
|
||||
func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool {
|
||||
return strings.HasPrefix(msg, string(p))
|
||||
}
|
||||
|
||||
// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches
|
||||
// the log entry is excluded.
|
||||
type ExcludeByRegexp struct {
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Exclude the log message if the message string matches the regexp
|
||||
func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool {
|
||||
return e.Regexp.MatchString(msg)
|
||||
}
|
||||
|
||||
// ExcludeFuncs is a slice of functions that will called to see if a log entry
|
||||
// should be filtered or not. It stops calling functions once at least one returns
|
||||
// true.
|
||||
type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool
|
||||
|
||||
// Calls each function until one of them returns true
|
||||
func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool {
|
||||
for _, f := range ff {
|
||||
if f(level, msg, args...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
64
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
64
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
protect sync.Once
|
||||
def Logger
|
||||
|
||||
// DefaultOptions is used to create the Default logger. These are read
|
||||
// only when the Default logger is created, so set them as soon as the
|
||||
// process starts.
|
||||
DefaultOptions = &LoggerOptions{
|
||||
Level: DefaultLevel,
|
||||
Output: DefaultOutput,
|
||||
TimeFn: time.Now,
|
||||
}
|
||||
)
|
||||
|
||||
// Default returns a globally held logger. This can be a good starting
|
||||
// place, and then you can use .With() and .Named() to create sub-loggers
|
||||
// to be used in more specific contexts.
|
||||
// The value of the Default logger can be set via SetDefault() or by
|
||||
// changing the options in DefaultOptions.
|
||||
//
|
||||
// This method is goroutine safe, returning a global from memory, but
|
||||
// care should be used if SetDefault() is called it random times
|
||||
// in the program as that may result in race conditions and an unexpected
|
||||
// Logger being returned.
|
||||
func Default() Logger {
|
||||
protect.Do(func() {
|
||||
// If SetDefault was used before Default() was called, we need to
|
||||
// detect that here.
|
||||
if def == nil {
|
||||
def = New(DefaultOptions)
|
||||
}
|
||||
})
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// L is a short alias for Default().
|
||||
func L() Logger {
|
||||
return Default()
|
||||
}
|
||||
|
||||
// SetDefault changes the logger to be returned by Default()and L()
|
||||
// to the one given. This allows packages to use the default logger
|
||||
// and have higher level packages change it to match the execution
|
||||
// environment. It returns any old default if there is one.
|
||||
//
|
||||
// NOTE: This is expected to be called early in the program to setup
|
||||
// a default logger. As such, it does not attempt to make itself
|
||||
// not racy with regard to the value of the default logger. Ergo
|
||||
// if it is called in goroutines, you may experience race conditions
|
||||
// with other goroutines retrieving the default logger. Basically,
|
||||
// don't do that.
|
||||
func SetDefault(log Logger) Logger {
|
||||
old := def
|
||||
def = log
|
||||
return old
|
||||
}
|
204
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
204
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
@ -1,204 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var _ Logger = &interceptLogger{}
|
||||
|
||||
type interceptLogger struct {
|
||||
Logger
|
||||
|
||||
mu *sync.Mutex
|
||||
sinkCount *int32
|
||||
Sinks map[SinkAdapter]struct{}
|
||||
}
|
||||
|
||||
func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
|
||||
l := newLogger(opts)
|
||||
if l.callerOffset > 0 {
|
||||
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log
|
||||
l.callerOffset += 2
|
||||
}
|
||||
intercept := &interceptLogger{
|
||||
Logger: l,
|
||||
mu: new(sync.Mutex),
|
||||
sinkCount: new(int32),
|
||||
Sinks: make(map[SinkAdapter]struct{}),
|
||||
}
|
||||
|
||||
atomic.StoreInt32(intercept.sinkCount, 0)
|
||||
|
||||
return intercept
|
||||
}
|
||||
|
||||
func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
|
||||
i.log(level, msg, args...)
|
||||
}
|
||||
|
||||
// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc
|
||||
// all called Log then direct calls to Log would have a different stack frame
|
||||
// depth. By having all the methods call the same helper we ensure the stack
|
||||
// frame depth is the same.
|
||||
func (i *interceptLogger) log(level Level, msg string, args ...interface{}) {
|
||||
i.Logger.Log(level, msg, args...)
|
||||
if atomic.LoadInt32(i.sinkCount) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
for s := range i.Sinks {
|
||||
s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Emit the message and args at TRACE level to log and sinks
|
||||
func (i *interceptLogger) Trace(msg string, args ...interface{}) {
|
||||
i.log(Trace, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at DEBUG level to log and sinks
|
||||
func (i *interceptLogger) Debug(msg string, args ...interface{}) {
|
||||
i.log(Debug, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at INFO level to log and sinks
|
||||
func (i *interceptLogger) Info(msg string, args ...interface{}) {
|
||||
i.log(Info, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at WARN level to log and sinks
|
||||
func (i *interceptLogger) Warn(msg string, args ...interface{}) {
|
||||
i.log(Warn, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at ERROR level to log and sinks
|
||||
func (i *interceptLogger) Error(msg string, args ...interface{}) {
|
||||
i.log(Error, msg, args...)
|
||||
}
|
||||
|
||||
func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
|
||||
top := i.Logger.ImpliedArgs()
|
||||
|
||||
cp := make([]interface{}, len(top)+len(args))
|
||||
copy(cp, top)
|
||||
copy(cp[len(top):], args)
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// Create a new sub-Logger that a name descending from the current name.
|
||||
// This is used to create a subsystem specific Logger.
|
||||
// Registered sinks will subscribe to these messages as well.
|
||||
func (i *interceptLogger) Named(name string) Logger {
|
||||
return i.NamedIntercept(name)
|
||||
}
|
||||
|
||||
// Create a new sub-Logger with an explicit name. This ignores the current
|
||||
// name. This is used to create a standalone logger that doesn't fall
|
||||
// within the normal hierarchy. Registered sinks will subscribe
|
||||
// to these messages as well.
|
||||
func (i *interceptLogger) ResetNamed(name string) Logger {
|
||||
return i.ResetNamedIntercept(name)
|
||||
}
|
||||
|
||||
// Create a new sub-Logger that a name decending from the current name.
|
||||
// This is used to create a subsystem specific Logger.
|
||||
// Registered sinks will subscribe to these messages as well.
|
||||
func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
|
||||
var sub interceptLogger
|
||||
|
||||
sub = *i
|
||||
sub.Logger = i.Logger.Named(name)
|
||||
return &sub
|
||||
}
|
||||
|
||||
// Create a new sub-Logger with an explicit name. This ignores the current
|
||||
// name. This is used to create a standalone logger that doesn't fall
|
||||
// within the normal hierarchy. Registered sinks will subscribe
|
||||
// to these messages as well.
|
||||
func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
|
||||
var sub interceptLogger
|
||||
|
||||
sub = *i
|
||||
sub.Logger = i.Logger.ResetNamed(name)
|
||||
return &sub
|
||||
}
|
||||
|
||||
// Return a sub-Logger for which every emitted log message will contain
|
||||
// the given key/value pairs. This is used to create a context specific
|
||||
// Logger.
|
||||
func (i *interceptLogger) With(args ...interface{}) Logger {
|
||||
var sub interceptLogger
|
||||
|
||||
sub = *i
|
||||
|
||||
sub.Logger = i.Logger.With(args...)
|
||||
|
||||
return &sub
|
||||
}
|
||||
|
||||
// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
|
||||
func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
i.Sinks[sink] = struct{}{}
|
||||
|
||||
atomic.AddInt32(i.sinkCount, 1)
|
||||
}
|
||||
|
||||
// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
|
||||
func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
delete(i.Sinks, sink)
|
||||
|
||||
atomic.AddInt32(i.sinkCount, -1)
|
||||
}
|
||||
|
||||
func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
|
||||
return i.StandardLogger(opts)
|
||||
}
|
||||
|
||||
func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
|
||||
if opts == nil {
|
||||
opts = &StandardLoggerOptions{}
|
||||
}
|
||||
|
||||
return log.New(i.StandardWriter(opts), "", 0)
|
||||
}
|
||||
|
||||
func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
|
||||
return i.StandardWriter(opts)
|
||||
}
|
||||
|
||||
func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
return &stdlogAdapter{
|
||||
log: i,
|
||||
inferLevels: opts.InferLevels,
|
||||
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
|
||||
forceLevel: opts.ForceLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
|
||||
if or, ok := i.Logger.(OutputResettable); ok {
|
||||
return or.ResetOutput(opts)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
|
||||
if or, ok := i.Logger.(OutputResettable); ok {
|
||||
return or.ResetOutputWithFlush(opts, flushable)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
776
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
776
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
@ -1,776 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
// TimeFormat is the time format to use for plain (non-JSON) output.
|
||||
// This is a version of RFC3339 that contains millisecond precision.
|
||||
const TimeFormat = "2006-01-02T15:04:05.000Z0700"
|
||||
|
||||
// TimeFormatJSON is the time format to use for JSON output.
|
||||
// This is a version of RFC3339 that contains microsecond precision.
|
||||
const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00"
|
||||
|
||||
// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
|
||||
const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
|
||||
|
||||
var (
|
||||
_levelToBracket = map[Level]string{
|
||||
Debug: "[DEBUG]",
|
||||
Trace: "[TRACE]",
|
||||
Info: "[INFO] ",
|
||||
Warn: "[WARN] ",
|
||||
Error: "[ERROR]",
|
||||
}
|
||||
|
||||
_levelToColor = map[Level]*color.Color{
|
||||
Debug: color.New(color.FgHiWhite),
|
||||
Trace: color.New(color.FgHiGreen),
|
||||
Info: color.New(color.FgHiBlue),
|
||||
Warn: color.New(color.FgHiYellow),
|
||||
Error: color.New(color.FgHiRed),
|
||||
}
|
||||
)
|
||||
|
||||
// Make sure that intLogger is a Logger
|
||||
var _ Logger = &intLogger{}
|
||||
|
||||
// intLogger is an internal logger implementation. Internal in that it is
|
||||
// defined entirely by this package.
|
||||
type intLogger struct {
|
||||
json bool
|
||||
callerOffset int
|
||||
name string
|
||||
timeFormat string
|
||||
timeFn TimeFunction
|
||||
disableTime bool
|
||||
|
||||
// This is an interface so that it's shared by any derived loggers, since
|
||||
// those derived loggers share the bufio.Writer as well.
|
||||
mutex Locker
|
||||
writer *writer
|
||||
level *int32
|
||||
|
||||
headerColor ColorOption
|
||||
|
||||
implied []interface{}
|
||||
|
||||
exclude func(level Level, msg string, args ...interface{}) bool
|
||||
|
||||
// create subloggers with their own level setting
|
||||
independentLevels bool
|
||||
}
|
||||
|
||||
// New returns a configured logger.
|
||||
func New(opts *LoggerOptions) Logger {
|
||||
return newLogger(opts)
|
||||
}
|
||||
|
||||
// NewSinkAdapter returns a SinkAdapter with configured settings
|
||||
// defined by LoggerOptions
|
||||
func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
|
||||
l := newLogger(opts)
|
||||
if l.callerOffset > 0 {
|
||||
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept
|
||||
l.callerOffset += 2
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func newLogger(opts *LoggerOptions) *intLogger {
|
||||
if opts == nil {
|
||||
opts = &LoggerOptions{}
|
||||
}
|
||||
|
||||
output := opts.Output
|
||||
if output == nil {
|
||||
output = DefaultOutput
|
||||
}
|
||||
|
||||
level := opts.Level
|
||||
if level == NoLevel {
|
||||
level = DefaultLevel
|
||||
}
|
||||
|
||||
mutex := opts.Mutex
|
||||
if mutex == nil {
|
||||
mutex = new(sync.Mutex)
|
||||
}
|
||||
|
||||
var primaryColor, headerColor ColorOption
|
||||
|
||||
if opts.ColorHeaderOnly {
|
||||
primaryColor = ColorOff
|
||||
headerColor = opts.Color
|
||||
} else {
|
||||
primaryColor = opts.Color
|
||||
headerColor = ColorOff
|
||||
}
|
||||
|
||||
l := &intLogger{
|
||||
json: opts.JSONFormat,
|
||||
name: opts.Name,
|
||||
timeFormat: TimeFormat,
|
||||
timeFn: time.Now,
|
||||
disableTime: opts.DisableTime,
|
||||
mutex: mutex,
|
||||
writer: newWriter(output, primaryColor),
|
||||
level: new(int32),
|
||||
exclude: opts.Exclude,
|
||||
independentLevels: opts.IndependentLevels,
|
||||
headerColor: headerColor,
|
||||
}
|
||||
if opts.IncludeLocation {
|
||||
l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
|
||||
}
|
||||
|
||||
if l.json {
|
||||
l.timeFormat = TimeFormatJSON
|
||||
}
|
||||
if opts.TimeFn != nil {
|
||||
l.timeFn = opts.TimeFn
|
||||
}
|
||||
if opts.TimeFormat != "" {
|
||||
l.timeFormat = opts.TimeFormat
|
||||
}
|
||||
|
||||
l.setColorization(opts)
|
||||
|
||||
atomic.StoreInt32(l.level, int32(level))
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// offsetIntLogger is the stack frame offset in the call stack for the caller to
|
||||
// one of the Warn, Info, Log, etc methods.
|
||||
const offsetIntLogger = 3
|
||||
|
||||
// Log a message and a set of key/value pairs if the given level is at
|
||||
// or more severe that the threshold configured in the Logger.
|
||||
func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
|
||||
if level < Level(atomic.LoadInt32(l.level)) {
|
||||
return
|
||||
}
|
||||
|
||||
t := l.timeFn()
|
||||
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
|
||||
if l.exclude != nil && l.exclude(level, msg, args...) {
|
||||
return
|
||||
}
|
||||
|
||||
if l.json {
|
||||
l.logJSON(t, name, level, msg, args...)
|
||||
} else {
|
||||
l.logPlain(t, name, level, msg, args...)
|
||||
}
|
||||
|
||||
l.writer.Flush(level)
|
||||
}
|
||||
|
||||
// Cleanup a path by returning the last 2 segments of the path only.
|
||||
func trimCallerPath(path string) string {
|
||||
// lovely borrowed from zap
|
||||
// nb. To make sure we trim the path correctly on Windows too, we
|
||||
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
|
||||
// because the path given originates from Go stdlib, specifically
|
||||
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
|
||||
// Windows.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/3335
|
||||
// and https://github.com/golang/go/issues/18151
|
||||
//
|
||||
// for discussion on the issue on Go side.
|
||||
|
||||
// Find the last separator.
|
||||
idx := strings.LastIndexByte(path, '/')
|
||||
if idx == -1 {
|
||||
return path
|
||||
}
|
||||
|
||||
// Find the penultimate separator.
|
||||
idx = strings.LastIndexByte(path[:idx], '/')
|
||||
if idx == -1 {
|
||||
return path
|
||||
}
|
||||
|
||||
return path[idx+1:]
|
||||
}
|
||||
|
||||
// isNormal indicates if the rune is one allowed to exist as an unquoted
|
||||
// string value. This is a subset of ASCII, `-` through `~`.
|
||||
func isNormal(r rune) bool {
|
||||
return 0x2D <= r && r <= 0x7E // - through ~
|
||||
}
|
||||
|
||||
// needsQuoting returns false if all the runes in string are normal, according
|
||||
// to isNormal
|
||||
func needsQuoting(str string) bool {
|
||||
for _, r := range str {
|
||||
if !isNormal(r) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Non-JSON logging format function
|
||||
func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
|
||||
|
||||
if !l.disableTime {
|
||||
l.writer.WriteString(t.Format(l.timeFormat))
|
||||
l.writer.WriteByte(' ')
|
||||
}
|
||||
|
||||
s, ok := _levelToBracket[level]
|
||||
if ok {
|
||||
if l.headerColor != ColorOff {
|
||||
color := _levelToColor[level]
|
||||
color.Fprint(l.writer, s)
|
||||
} else {
|
||||
l.writer.WriteString(s)
|
||||
}
|
||||
} else {
|
||||
l.writer.WriteString("[?????]")
|
||||
}
|
||||
|
||||
if l.callerOffset > 0 {
|
||||
if _, file, line, ok := runtime.Caller(l.callerOffset); ok {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(trimCallerPath(file))
|
||||
l.writer.WriteByte(':')
|
||||
l.writer.WriteString(strconv.Itoa(line))
|
||||
l.writer.WriteByte(':')
|
||||
}
|
||||
}
|
||||
|
||||
l.writer.WriteByte(' ')
|
||||
|
||||
if name != "" {
|
||||
l.writer.WriteString(name)
|
||||
if msg != "" {
|
||||
l.writer.WriteString(": ")
|
||||
l.writer.WriteString(msg)
|
||||
}
|
||||
} else if msg != "" {
|
||||
l.writer.WriteString(msg)
|
||||
}
|
||||
|
||||
args = append(l.implied, args...)
|
||||
|
||||
var stacktrace CapturedStacktrace
|
||||
|
||||
if args != nil && len(args) > 0 {
|
||||
if len(args)%2 != 0 {
|
||||
cs, ok := args[len(args)-1].(CapturedStacktrace)
|
||||
if ok {
|
||||
args = args[:len(args)-1]
|
||||
stacktrace = cs
|
||||
} else {
|
||||
extra := args[len(args)-1]
|
||||
args = append(args[:len(args)-1], MissingKey, extra)
|
||||
}
|
||||
}
|
||||
|
||||
l.writer.WriteByte(':')
|
||||
|
||||
FOR:
|
||||
for i := 0; i < len(args); i = i + 2 {
|
||||
var (
|
||||
val string
|
||||
raw bool
|
||||
)
|
||||
|
||||
switch st := args[i+1].(type) {
|
||||
case string:
|
||||
val = st
|
||||
if st == "" {
|
||||
val = `""`
|
||||
raw = true
|
||||
}
|
||||
case int:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
case int64:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
case int32:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
case int16:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
case int8:
|
||||
val = strconv.FormatInt(int64(st), 10)
|
||||
case uint:
|
||||
val = strconv.FormatUint(uint64(st), 10)
|
||||
case uint64:
|
||||
val = strconv.FormatUint(uint64(st), 10)
|
||||
case uint32:
|
||||
val = strconv.FormatUint(uint64(st), 10)
|
||||
case uint16:
|
||||
val = strconv.FormatUint(uint64(st), 10)
|
||||
case uint8:
|
||||
val = strconv.FormatUint(uint64(st), 10)
|
||||
case Hex:
|
||||
val = "0x" + strconv.FormatUint(uint64(st), 16)
|
||||
case Octal:
|
||||
val = "0" + strconv.FormatUint(uint64(st), 8)
|
||||
case Binary:
|
||||
val = "0b" + strconv.FormatUint(uint64(st), 2)
|
||||
case CapturedStacktrace:
|
||||
stacktrace = st
|
||||
continue FOR
|
||||
case Format:
|
||||
val = fmt.Sprintf(st[0].(string), st[1:]...)
|
||||
case Quote:
|
||||
raw = true
|
||||
val = strconv.Quote(string(st))
|
||||
default:
|
||||
v := reflect.ValueOf(st)
|
||||
if v.Kind() == reflect.Slice {
|
||||
val = l.renderSlice(v)
|
||||
raw = true
|
||||
} else {
|
||||
val = fmt.Sprintf("%v", st)
|
||||
}
|
||||
}
|
||||
|
||||
var key string
|
||||
|
||||
switch st := args[i].(type) {
|
||||
case string:
|
||||
key = st
|
||||
default:
|
||||
key = fmt.Sprintf("%s", st)
|
||||
}
|
||||
|
||||
if strings.Contains(val, "\n") {
|
||||
l.writer.WriteString("\n ")
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteString("=\n")
|
||||
writeIndent(l.writer, val, " | ")
|
||||
l.writer.WriteString(" ")
|
||||
} else if !raw && needsQuoting(val) {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteByte('=')
|
||||
l.writer.WriteString(strconv.Quote(val))
|
||||
} else {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteByte('=')
|
||||
l.writer.WriteString(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.writer.WriteString("\n")
|
||||
|
||||
if stacktrace != "" {
|
||||
l.writer.WriteString(string(stacktrace))
|
||||
l.writer.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func writeIndent(w *writer, str string, indent string) {
|
||||
for {
|
||||
nl := strings.IndexByte(str, "\n"[0])
|
||||
if nl == -1 {
|
||||
if str != "" {
|
||||
w.WriteString(indent)
|
||||
w.WriteString(str)
|
||||
w.WriteString("\n")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteString(indent)
|
||||
w.WriteString(str[:nl])
|
||||
w.WriteString("\n")
|
||||
str = str[nl+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func (l *intLogger) renderSlice(v reflect.Value) string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
buf.WriteRune('[')
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
|
||||
sv := v.Index(i)
|
||||
|
||||
var val string
|
||||
|
||||
switch sv.Kind() {
|
||||
case reflect.String:
|
||||
val = strconv.Quote(sv.String())
|
||||
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
val = strconv.FormatInt(sv.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
val = strconv.FormatUint(sv.Uint(), 10)
|
||||
default:
|
||||
val = fmt.Sprintf("%v", sv.Interface())
|
||||
if strings.ContainsAny(val, " \t\n\r") {
|
||||
val = strconv.Quote(val)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(val)
|
||||
}
|
||||
|
||||
buf.WriteRune(']')
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// JSON logging function
|
||||
func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
|
||||
vals := l.jsonMapEntry(t, name, level, msg)
|
||||
args = append(l.implied, args...)
|
||||
|
||||
if args != nil && len(args) > 0 {
|
||||
if len(args)%2 != 0 {
|
||||
cs, ok := args[len(args)-1].(CapturedStacktrace)
|
||||
if ok {
|
||||
args = args[:len(args)-1]
|
||||
vals["stacktrace"] = cs
|
||||
} else {
|
||||
extra := args[len(args)-1]
|
||||
args = append(args[:len(args)-1], MissingKey, extra)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i = i + 2 {
|
||||
val := args[i+1]
|
||||
switch sv := val.(type) {
|
||||
case error:
|
||||
// Check if val is of type error. If error type doesn't
|
||||
// implement json.Marshaler or encoding.TextMarshaler
|
||||
// then set val to err.Error() so that it gets marshaled
|
||||
switch sv.(type) {
|
||||
case json.Marshaler, encoding.TextMarshaler:
|
||||
default:
|
||||
val = sv.Error()
|
||||
}
|
||||
case Format:
|
||||
val = fmt.Sprintf(sv[0].(string), sv[1:]...)
|
||||
}
|
||||
|
||||
var key string
|
||||
|
||||
switch st := args[i].(type) {
|
||||
case string:
|
||||
key = st
|
||||
default:
|
||||
key = fmt.Sprintf("%s", st)
|
||||
}
|
||||
vals[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
err := json.NewEncoder(l.writer).Encode(vals)
|
||||
if err != nil {
|
||||
if _, ok := err.(*json.UnsupportedTypeError); ok {
|
||||
plainVal := l.jsonMapEntry(t, name, level, msg)
|
||||
plainVal["@warn"] = errJsonUnsupportedTypeMsg
|
||||
|
||||
json.NewEncoder(l.writer).Encode(plainVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
|
||||
vals := map[string]interface{}{
|
||||
"@message": msg,
|
||||
}
|
||||
if !l.disableTime {
|
||||
vals["@timestamp"] = t.Format(l.timeFormat)
|
||||
}
|
||||
|
||||
var levelStr string
|
||||
switch level {
|
||||
case Error:
|
||||
levelStr = "error"
|
||||
case Warn:
|
||||
levelStr = "warn"
|
||||
case Info:
|
||||
levelStr = "info"
|
||||
case Debug:
|
||||
levelStr = "debug"
|
||||
case Trace:
|
||||
levelStr = "trace"
|
||||
default:
|
||||
levelStr = "all"
|
||||
}
|
||||
|
||||
vals["@level"] = levelStr
|
||||
|
||||
if name != "" {
|
||||
vals["@module"] = name
|
||||
}
|
||||
|
||||
if l.callerOffset > 0 {
|
||||
if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok {
|
||||
vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// Emit the message and args at the provided level
|
||||
func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
|
||||
l.log(l.Name(), level, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at DEBUG level
|
||||
func (l *intLogger) Debug(msg string, args ...interface{}) {
|
||||
l.log(l.Name(), Debug, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at TRACE level
|
||||
func (l *intLogger) Trace(msg string, args ...interface{}) {
|
||||
l.log(l.Name(), Trace, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at INFO level
|
||||
func (l *intLogger) Info(msg string, args ...interface{}) {
|
||||
l.log(l.Name(), Info, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at WARN level
|
||||
func (l *intLogger) Warn(msg string, args ...interface{}) {
|
||||
l.log(l.Name(), Warn, msg, args...)
|
||||
}
|
||||
|
||||
// Emit the message and args at ERROR level
|
||||
func (l *intLogger) Error(msg string, args ...interface{}) {
|
||||
l.log(l.Name(), Error, msg, args...)
|
||||
}
|
||||
|
||||
// Indicate that the logger would emit TRACE level logs
|
||||
func (l *intLogger) IsTrace() bool {
|
||||
return Level(atomic.LoadInt32(l.level)) == Trace
|
||||
}
|
||||
|
||||
// Indicate that the logger would emit DEBUG level logs
|
||||
func (l *intLogger) IsDebug() bool {
|
||||
return Level(atomic.LoadInt32(l.level)) <= Debug
|
||||
}
|
||||
|
||||
// Indicate that the logger would emit INFO level logs
|
||||
func (l *intLogger) IsInfo() bool {
|
||||
return Level(atomic.LoadInt32(l.level)) <= Info
|
||||
}
|
||||
|
||||
// Indicate that the logger would emit WARN level logs
|
||||
func (l *intLogger) IsWarn() bool {
|
||||
return Level(atomic.LoadInt32(l.level)) <= Warn
|
||||
}
|
||||
|
||||
// Indicate that the logger would emit ERROR level logs
|
||||
func (l *intLogger) IsError() bool {
|
||||
return Level(atomic.LoadInt32(l.level)) <= Error
|
||||
}
|
||||
|
||||
const MissingKey = "EXTRA_VALUE_AT_END"
|
||||
|
||||
// Return a sub-Logger for which every emitted log message will contain
|
||||
// the given key/value pairs. This is used to create a context specific
|
||||
// Logger.
|
||||
func (l *intLogger) With(args ...interface{}) Logger {
|
||||
var extra interface{}
|
||||
|
||||
if len(args)%2 != 0 {
|
||||
extra = args[len(args)-1]
|
||||
args = args[:len(args)-1]
|
||||
}
|
||||
|
||||
sl := l.copy()
|
||||
|
||||
result := make(map[string]interface{}, len(l.implied)+len(args))
|
||||
keys := make([]string, 0, len(l.implied)+len(args))
|
||||
|
||||
// Read existing args, store map and key for consistent sorting
|
||||
for i := 0; i < len(l.implied); i += 2 {
|
||||
key := l.implied[i].(string)
|
||||
keys = append(keys, key)
|
||||
result[key] = l.implied[i+1]
|
||||
}
|
||||
// Read new args, store map and key for consistent sorting
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
key := args[i].(string)
|
||||
_, exists := result[key]
|
||||
if !exists {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
result[key] = args[i+1]
|
||||
}
|
||||
|
||||
// Sort keys to be consistent
|
||||
sort.Strings(keys)
|
||||
|
||||
sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
|
||||
for _, k := range keys {
|
||||
sl.implied = append(sl.implied, k)
|
||||
sl.implied = append(sl.implied, result[k])
|
||||
}
|
||||
|
||||
if extra != nil {
|
||||
sl.implied = append(sl.implied, MissingKey, extra)
|
||||
}
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
// Create a new sub-Logger that a name decending from the current name.
|
||||
// This is used to create a subsystem specific Logger.
|
||||
func (l *intLogger) Named(name string) Logger {
|
||||
sl := l.copy()
|
||||
|
||||
if sl.name != "" {
|
||||
sl.name = sl.name + "." + name
|
||||
} else {
|
||||
sl.name = name
|
||||
}
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
// Create a new sub-Logger with an explicit name. This ignores the current
|
||||
// name. This is used to create a standalone logger that doesn't fall
|
||||
// within the normal hierarchy.
|
||||
func (l *intLogger) ResetNamed(name string) Logger {
|
||||
sl := l.copy()
|
||||
|
||||
sl.name = name
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
|
||||
if opts.Output == nil {
|
||||
return errors.New("given output is nil")
|
||||
}
|
||||
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
|
||||
return l.resetOutput(opts)
|
||||
}
|
||||
|
||||
func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
|
||||
if opts.Output == nil {
|
||||
return errors.New("given output is nil")
|
||||
}
|
||||
if flushable == nil {
|
||||
return errors.New("flushable is nil")
|
||||
}
|
||||
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
|
||||
if err := flushable.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return l.resetOutput(opts)
|
||||
}
|
||||
|
||||
func (l *intLogger) resetOutput(opts *LoggerOptions) error {
|
||||
l.writer = newWriter(opts.Output, opts.Color)
|
||||
l.setColorization(opts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the logging level on-the-fly. This will affect all subloggers as
|
||||
// well.
|
||||
func (l *intLogger) SetLevel(level Level) {
|
||||
atomic.StoreInt32(l.level, int32(level))
|
||||
}
|
||||
|
||||
// Create a *log.Logger that will send it's data through this Logger. This
|
||||
// allows packages that expect to be using the standard library log to actually
|
||||
// use this logger.
|
||||
func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
|
||||
if opts == nil {
|
||||
opts = &StandardLoggerOptions{}
|
||||
}
|
||||
|
||||
return log.New(l.StandardWriter(opts), "", 0)
|
||||
}
|
||||
|
||||
func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
newLog := *l
|
||||
if l.callerOffset > 0 {
|
||||
// the stack is
|
||||
// logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch()
|
||||
// So plus 4.
|
||||
newLog.callerOffset = l.callerOffset + 4
|
||||
}
|
||||
return &stdlogAdapter{
|
||||
log: &newLog,
|
||||
inferLevels: opts.InferLevels,
|
||||
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
|
||||
forceLevel: opts.ForceLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the underlying io.Writer is a file, and
|
||||
// panics if not. For use by colorization.
|
||||
func (l *intLogger) checkWriterIsFile() *os.File {
|
||||
fi, ok := l.writer.w.(*os.File)
|
||||
if !ok {
|
||||
panic("Cannot enable coloring of non-file Writers")
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// Accept implements the SinkAdapter interface
|
||||
func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
|
||||
i.log(name, level, msg, args...)
|
||||
}
|
||||
|
||||
// ImpliedArgs returns the loggers implied args
|
||||
func (i *intLogger) ImpliedArgs() []interface{} {
|
||||
return i.implied
|
||||
}
|
||||
|
||||
// Name returns the loggers name
|
||||
func (i *intLogger) Name() string {
|
||||
return i.name
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of the intLogger, replacing the level pointer
|
||||
// when necessary
|
||||
func (l *intLogger) copy() *intLogger {
|
||||
sl := *l
|
||||
|
||||
if l.independentLevels {
|
||||
sl.level = new(int32)
|
||||
*sl.level = *l.level
|
||||
}
|
||||
|
||||
return &sl
|
||||
}
|
369
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
369
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
@ -1,369 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultOutput is used as the default log output.
|
||||
DefaultOutput io.Writer = os.Stderr
|
||||
|
||||
// DefaultLevel is used as the default log level.
|
||||
DefaultLevel = Info
|
||||
)
|
||||
|
||||
// Level represents a log level.
|
||||
type Level int32
|
||||
|
||||
const (
|
||||
// NoLevel is a special level used to indicate that no level has been
|
||||
// set and allow for a default to be used.
|
||||
NoLevel Level = 0
|
||||
|
||||
// Trace is the most verbose level. Intended to be used for the tracing
|
||||
// of actions in code, such as function enters/exits, etc.
|
||||
Trace Level = 1
|
||||
|
||||
// Debug information for programmer low-level analysis.
|
||||
Debug Level = 2
|
||||
|
||||
// Info information about steady state operations.
|
||||
Info Level = 3
|
||||
|
||||
// Warn information about rare but handled events.
|
||||
Warn Level = 4
|
||||
|
||||
// Error information about unrecoverable events.
|
||||
Error Level = 5
|
||||
|
||||
// Off disables all logging output.
|
||||
Off Level = 6
|
||||
)
|
||||
|
||||
// Format is a simple convenience type for when formatting is required. When
|
||||
// processing a value of this type, the logger automatically treats the first
|
||||
// argument as a Printf formatting string and passes the rest as the values
|
||||
// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
|
||||
type Format []interface{}
|
||||
|
||||
// Fmt returns a Format type. This is a convenience function for creating a Format
|
||||
// type.
|
||||
func Fmt(str string, args ...interface{}) Format {
|
||||
return append(Format{str}, args...)
|
||||
}
|
||||
|
||||
// A simple shortcut to format numbers in hex when displayed with the normal
|
||||
// text output. For example: L.Info("header value", Hex(17))
|
||||
type Hex int
|
||||
|
||||
// A simple shortcut to format numbers in octal when displayed with the normal
|
||||
// text output. For example: L.Info("perms", Octal(17))
|
||||
type Octal int
|
||||
|
||||
// A simple shortcut to format numbers in binary when displayed with the normal
|
||||
// text output. For example: L.Info("bits", Binary(17))
|
||||
type Binary int
|
||||
|
||||
// A simple shortcut to format strings with Go quoting. Control and
|
||||
// non-printable characters will be escaped with their backslash equivalents in
|
||||
// output. Intended for untrusted or multiline strings which should be logged
|
||||
// as concisely as possible.
|
||||
type Quote string
|
||||
|
||||
// ColorOption expresses how the output should be colored, if at all.
|
||||
type ColorOption uint8
|
||||
|
||||
const (
|
||||
// ColorOff is the default coloration, and does not
|
||||
// inject color codes into the io.Writer.
|
||||
ColorOff ColorOption = iota
|
||||
// AutoColor checks if the io.Writer is a tty,
|
||||
// and if so enables coloring.
|
||||
AutoColor
|
||||
// ForceColor will enable coloring, regardless of whether
|
||||
// the io.Writer is a tty or not.
|
||||
ForceColor
|
||||
)
|
||||
|
||||
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
|
||||
// the level string is invalid. This facilitates setting the log level via
|
||||
// config or environment variable by name in a predictable way.
|
||||
func LevelFromString(levelStr string) Level {
|
||||
// We don't care about case. Accept both "INFO" and "info".
|
||||
levelStr = strings.ToLower(strings.TrimSpace(levelStr))
|
||||
switch levelStr {
|
||||
case "trace":
|
||||
return Trace
|
||||
case "debug":
|
||||
return Debug
|
||||
case "info":
|
||||
return Info
|
||||
case "warn":
|
||||
return Warn
|
||||
case "error":
|
||||
return Error
|
||||
case "off":
|
||||
return Off
|
||||
default:
|
||||
return NoLevel
|
||||
}
|
||||
}
|
||||
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case Trace:
|
||||
return "trace"
|
||||
case Debug:
|
||||
return "debug"
|
||||
case Info:
|
||||
return "info"
|
||||
case Warn:
|
||||
return "warn"
|
||||
case Error:
|
||||
return "error"
|
||||
case NoLevel:
|
||||
return "none"
|
||||
case Off:
|
||||
return "off"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Logger describes the interface that must be implemented by all loggers.
|
||||
type Logger interface {
|
||||
// Args are alternating key, val pairs
|
||||
// keys must be strings
|
||||
// vals can be any type, but display is implementation specific
|
||||
// Emit a message and key/value pairs at a provided log level
|
||||
Log(level Level, msg string, args ...interface{})
|
||||
|
||||
// Emit a message and key/value pairs at the TRACE level
|
||||
Trace(msg string, args ...interface{})
|
||||
|
||||
// Emit a message and key/value pairs at the DEBUG level
|
||||
Debug(msg string, args ...interface{})
|
||||
|
||||
// Emit a message and key/value pairs at the INFO level
|
||||
Info(msg string, args ...interface{})
|
||||
|
||||
// Emit a message and key/value pairs at the WARN level
|
||||
Warn(msg string, args ...interface{})
|
||||
|
||||
// Emit a message and key/value pairs at the ERROR level
|
||||
Error(msg string, args ...interface{})
|
||||
|
||||
// Indicate if TRACE logs would be emitted. This and the other Is* guards
|
||||
// are used to elide expensive logging code based on the current level.
|
||||
IsTrace() bool
|
||||
|
||||
// Indicate if DEBUG logs would be emitted. This and the other Is* guards
|
||||
IsDebug() bool
|
||||
|
||||
// Indicate if INFO logs would be emitted. This and the other Is* guards
|
||||
IsInfo() bool
|
||||
|
||||
// Indicate if WARN logs would be emitted. This and the other Is* guards
|
||||
IsWarn() bool
|
||||
|
||||
// Indicate if ERROR logs would be emitted. This and the other Is* guards
|
||||
IsError() bool
|
||||
|
||||
// ImpliedArgs returns With key/value pairs
|
||||
ImpliedArgs() []interface{}
|
||||
|
||||
// Creates a sublogger that will always have the given key/value pairs
|
||||
With(args ...interface{}) Logger
|
||||
|
||||
// Returns the Name of the logger
|
||||
Name() string
|
||||
|
||||
// Create a logger that will prepend the name string on the front of all messages.
|
||||
// If the logger already has a name, the new value will be appended to the current
|
||||
// name. That way, a major subsystem can use this to decorate all it's own logs
|
||||
// without losing context.
|
||||
Named(name string) Logger
|
||||
|
||||
// Create a logger that will prepend the name string on the front of all messages.
|
||||
// This sets the name of the logger to the value directly, unlike Named which honor
|
||||
// the current name as well.
|
||||
ResetNamed(name string) Logger
|
||||
|
||||
// Updates the level. This should affect all related loggers as well,
|
||||
// unless they were created with IndependentLevels. If an
|
||||
// implementation cannot update the level on the fly, it should no-op.
|
||||
SetLevel(level Level)
|
||||
|
||||
// Return a value that conforms to the stdlib log.Logger interface
|
||||
StandardLogger(opts *StandardLoggerOptions) *log.Logger
|
||||
|
||||
// Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
|
||||
StandardWriter(opts *StandardLoggerOptions) io.Writer
|
||||
}
|
||||
|
||||
// StandardLoggerOptions can be used to configure a new standard logger.
|
||||
type StandardLoggerOptions struct {
|
||||
// Indicate that some minimal parsing should be done on strings to try
|
||||
// and detect their level and re-emit them.
|
||||
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
|
||||
// [DEBUG] and strip it off before reapplying it.
|
||||
InferLevels bool
|
||||
|
||||
// Indicate that some minimal parsing should be done on strings to try
|
||||
// and detect their level and re-emit them while ignoring possible
|
||||
// timestamp values in the beginning of the string.
|
||||
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
|
||||
// [DEBUG] and strip it off before reapplying it.
|
||||
// The timestamp detection may result in false positives and incomplete
|
||||
// string outputs.
|
||||
InferLevelsWithTimestamp bool
|
||||
|
||||
// ForceLevel is used to force all output from the standard logger to be at
|
||||
// the specified level. Similar to InferLevels, this will strip any level
|
||||
// prefix contained in the logged string before applying the forced level.
|
||||
// If set, this override InferLevels.
|
||||
ForceLevel Level
|
||||
}
|
||||
|
||||
type TimeFunction = func() time.Time
|
||||
|
||||
// LoggerOptions can be used to configure a new logger.
|
||||
type LoggerOptions struct {
|
||||
// Name of the subsystem to prefix logs with
|
||||
Name string
|
||||
|
||||
// The threshold for the logger. Anything less severe is suppressed
|
||||
Level Level
|
||||
|
||||
// Where to write the logs to. Defaults to os.Stderr if nil
|
||||
Output io.Writer
|
||||
|
||||
// An optional Locker in case Output is shared. This can be a sync.Mutex or
|
||||
// a NoopLocker if the caller wants control over output, e.g. for batching
|
||||
// log lines.
|
||||
Mutex Locker
|
||||
|
||||
// Control if the output should be in JSON.
|
||||
JSONFormat bool
|
||||
|
||||
// Include file and line information in each log line
|
||||
IncludeLocation bool
|
||||
|
||||
// AdditionalLocationOffset is the number of additional stack levels to skip
|
||||
// when finding the file and line information for the log line
|
||||
AdditionalLocationOffset int
|
||||
|
||||
// The time format to use instead of the default
|
||||
TimeFormat string
|
||||
|
||||
// A function which is called to get the time object that is formatted using `TimeFormat`
|
||||
TimeFn TimeFunction
|
||||
|
||||
// Control whether or not to display the time at all. This is required
|
||||
// because setting TimeFormat to empty assumes the default format.
|
||||
DisableTime bool
|
||||
|
||||
// Color the output. On Windows, colored logs are only available for io.Writers that
|
||||
// are concretely instances of *os.File.
|
||||
Color ColorOption
|
||||
|
||||
// Only color the header, not the body. This can help with readability of long messages.
|
||||
ColorHeaderOnly bool
|
||||
|
||||
// A function which is called with the log information and if it returns true the value
|
||||
// should not be logged.
|
||||
// This is useful when interacting with a system that you wish to suppress the log
|
||||
// message for (because it's too noisy, etc)
|
||||
Exclude func(level Level, msg string, args ...interface{}) bool
|
||||
|
||||
// IndependentLevels causes subloggers to be created with an independent
|
||||
// copy of this logger's level. This means that using SetLevel on this
|
||||
// logger will not affect any subloggers, and SetLevel on any subloggers
|
||||
// will not affect the parent or sibling loggers.
|
||||
IndependentLevels bool
|
||||
}
|
||||
|
||||
// InterceptLogger describes the interface for using a logger
|
||||
// that can register different output sinks.
|
||||
// This is useful for sending lower level log messages
|
||||
// to a different output while keeping the root logger
|
||||
// at a higher one.
|
||||
type InterceptLogger interface {
|
||||
// Logger is the root logger for an InterceptLogger
|
||||
Logger
|
||||
|
||||
// RegisterSink adds a SinkAdapter to the InterceptLogger
|
||||
RegisterSink(sink SinkAdapter)
|
||||
|
||||
// DeregisterSink removes a SinkAdapter from the InterceptLogger
|
||||
DeregisterSink(sink SinkAdapter)
|
||||
|
||||
// Create a interceptlogger that will prepend the name string on the front of all messages.
|
||||
// If the logger already has a name, the new value will be appended to the current
|
||||
// name. That way, a major subsystem can use this to decorate all it's own logs
|
||||
// without losing context.
|
||||
NamedIntercept(name string) InterceptLogger
|
||||
|
||||
// Create a interceptlogger that will prepend the name string on the front of all messages.
|
||||
// This sets the name of the logger to the value directly, unlike Named which honor
|
||||
// the current name as well.
|
||||
ResetNamedIntercept(name string) InterceptLogger
|
||||
|
||||
// Deprecated: use StandardLogger
|
||||
StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
|
||||
|
||||
// Deprecated: use StandardWriter
|
||||
StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
|
||||
}
|
||||
|
||||
// SinkAdapter describes the interface that must be implemented
|
||||
// in order to Register a new sink to an InterceptLogger
|
||||
type SinkAdapter interface {
|
||||
Accept(name string, level Level, msg string, args ...interface{})
|
||||
}
|
||||
|
||||
// Flushable represents a method for flushing an output buffer. It can be used
|
||||
// if Resetting the log to use a new output, in order to flush the writes to
|
||||
// the existing output beforehand.
|
||||
type Flushable interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// OutputResettable provides ways to swap the output in use at runtime
|
||||
type OutputResettable interface {
|
||||
// ResetOutput swaps the current output writer with the one given in the
|
||||
// opts. Color options given in opts will be used for the new output.
|
||||
ResetOutput(opts *LoggerOptions) error
|
||||
|
||||
// ResetOutputWithFlush swaps the current output writer with the one given
|
||||
// in the opts, first calling Flush on the given Flushable. Color options
|
||||
// given in opts will be used for the new output.
|
||||
ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
|
||||
}
|
||||
|
||||
// Locker is used for locking output. If not set when creating a logger, a
|
||||
// sync.Mutex will be used internally.
|
||||
type Locker interface {
|
||||
// Lock is called when the output is going to be changed or written to
|
||||
Lock()
|
||||
|
||||
// Unlock is called when the operation that called Lock() completes
|
||||
Unlock()
|
||||
}
|
||||
|
||||
// NoopLocker implements locker but does nothing. This is useful if the client
|
||||
// wants tight control over locking, in order to provide grouping of log
|
||||
// entries or other functionality.
|
||||
type NoopLocker struct{}
|
||||
|
||||
// Lock does nothing
|
||||
func (n NoopLocker) Lock() {}
|
||||
|
||||
// Unlock does nothing
|
||||
func (n NoopLocker) Unlock() {}
|
||||
|
||||
var _ Locker = (*NoopLocker)(nil)
|
58
vendor/github.com/hashicorp/go-hclog/nulllogger.go
generated
vendored
58
vendor/github.com/hashicorp/go-hclog/nulllogger.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// NewNullLogger instantiates a Logger for which all calls
|
||||
// will succeed without doing anything.
|
||||
// Useful for testing purposes.
|
||||
func NewNullLogger() Logger {
|
||||
return &nullLogger{}
|
||||
}
|
||||
|
||||
type nullLogger struct{}
|
||||
|
||||
func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) Trace(msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) Debug(msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) Info(msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) Warn(msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) Error(msg string, args ...interface{}) {}
|
||||
|
||||
func (l *nullLogger) IsTrace() bool { return false }
|
||||
|
||||
func (l *nullLogger) IsDebug() bool { return false }
|
||||
|
||||
func (l *nullLogger) IsInfo() bool { return false }
|
||||
|
||||
func (l *nullLogger) IsWarn() bool { return false }
|
||||
|
||||
func (l *nullLogger) IsError() bool { return false }
|
||||
|
||||
func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
|
||||
|
||||
func (l *nullLogger) With(args ...interface{}) Logger { return l }
|
||||
|
||||
func (l *nullLogger) Name() string { return "" }
|
||||
|
||||
func (l *nullLogger) Named(name string) Logger { return l }
|
||||
|
||||
func (l *nullLogger) ResetNamed(name string) Logger { return l }
|
||||
|
||||
func (l *nullLogger) SetLevel(level Level) {}
|
||||
|
||||
func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
|
||||
return log.New(l.StandardWriter(opts), "", log.LstdFlags)
|
||||
}
|
||||
|
||||
func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
return ioutil.Discard
|
||||
}
|
109
vendor/github.com/hashicorp/go-hclog/stacktrace.go
generated
vendored
109
vendor/github.com/hashicorp/go-hclog/stacktrace.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
_stacktraceIgnorePrefixes = []string{
|
||||
"runtime.goexit",
|
||||
"runtime.main",
|
||||
}
|
||||
_stacktracePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return newProgramCounters(64)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// CapturedStacktrace represents a stacktrace captured by a previous call
|
||||
// to log.Stacktrace. If passed to a logging function, the stacktrace
|
||||
// will be appended.
|
||||
type CapturedStacktrace string
|
||||
|
||||
// Stacktrace captures a stacktrace of the current goroutine and returns
|
||||
// it to be passed to a logging function.
|
||||
func Stacktrace() CapturedStacktrace {
|
||||
return CapturedStacktrace(takeStacktrace())
|
||||
}
|
||||
|
||||
func takeStacktrace() string {
|
||||
programCounters := _stacktracePool.Get().(*programCounters)
|
||||
defer _stacktracePool.Put(programCounters)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
|
||||
for {
|
||||
// Skip the call to runtime.Counters and takeStacktrace so that the
|
||||
// program counters start at the caller of takeStacktrace.
|
||||
n := runtime.Callers(2, programCounters.pcs)
|
||||
if n < cap(programCounters.pcs) {
|
||||
programCounters.pcs = programCounters.pcs[:n]
|
||||
break
|
||||
}
|
||||
// Don't put the too-short counter slice back into the pool; this lets
|
||||
// the pool adjust if we consistently take deep stacktraces.
|
||||
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
|
||||
}
|
||||
|
||||
i := 0
|
||||
frames := runtime.CallersFrames(programCounters.pcs)
|
||||
for frame, more := frames.Next(); more; frame, more = frames.Next() {
|
||||
if shouldIgnoreStacktraceFunction(frame.Function) {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
buffer.WriteByte('\n')
|
||||
}
|
||||
i++
|
||||
buffer.WriteString(frame.Function)
|
||||
buffer.WriteByte('\n')
|
||||
buffer.WriteByte('\t')
|
||||
buffer.WriteString(frame.File)
|
||||
buffer.WriteByte(':')
|
||||
buffer.WriteString(strconv.Itoa(int(frame.Line)))
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func shouldIgnoreStacktraceFunction(function string) bool {
|
||||
for _, prefix := range _stacktraceIgnorePrefixes {
|
||||
if strings.HasPrefix(function, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type programCounters struct {
|
||||
pcs []uintptr
|
||||
}
|
||||
|
||||
func newProgramCounters(size int) *programCounters {
|
||||
return &programCounters{make([]uintptr, size)}
|
||||
}
|
110
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
110
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
@ -1,110 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Regex to ignore characters commonly found in timestamp formats from the
|
||||
// beginning of inputs.
|
||||
var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
|
||||
|
||||
// Provides a io.Writer to shim the data out of *log.Logger
|
||||
// and back into our Logger. This is basically the only way to
|
||||
// build upon *log.Logger.
|
||||
type stdlogAdapter struct {
|
||||
log Logger
|
||||
inferLevels bool
|
||||
inferLevelsWithTimestamp bool
|
||||
forceLevel Level
|
||||
}
|
||||
|
||||
// Take the data, infer the levels if configured, and send it through
|
||||
// a regular Logger.
|
||||
func (s *stdlogAdapter) Write(data []byte) (int, error) {
|
||||
str := string(bytes.TrimRight(data, " \t\n"))
|
||||
|
||||
if s.forceLevel != NoLevel {
|
||||
// Use pickLevel to strip log levels included in the line since we are
|
||||
// forcing the level
|
||||
_, str := s.pickLevel(str)
|
||||
|
||||
// Log at the forced level
|
||||
s.dispatch(str, s.forceLevel)
|
||||
} else if s.inferLevels {
|
||||
if s.inferLevelsWithTimestamp {
|
||||
str = s.trimTimestamp(str)
|
||||
}
|
||||
|
||||
level, str := s.pickLevel(str)
|
||||
s.dispatch(str, level)
|
||||
} else {
|
||||
s.log.Info(str)
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (s *stdlogAdapter) dispatch(str string, level Level) {
|
||||
switch level {
|
||||
case Trace:
|
||||
s.log.Trace(str)
|
||||
case Debug:
|
||||
s.log.Debug(str)
|
||||
case Info:
|
||||
s.log.Info(str)
|
||||
case Warn:
|
||||
s.log.Warn(str)
|
||||
case Error:
|
||||
s.log.Error(str)
|
||||
default:
|
||||
s.log.Info(str)
|
||||
}
|
||||
}
|
||||
|
||||
// Detect, based on conventions, what log level this is.
|
||||
func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
|
||||
switch {
|
||||
case strings.HasPrefix(str, "[DEBUG]"):
|
||||
return Debug, strings.TrimSpace(str[7:])
|
||||
case strings.HasPrefix(str, "[TRACE]"):
|
||||
return Trace, strings.TrimSpace(str[7:])
|
||||
case strings.HasPrefix(str, "[INFO]"):
|
||||
return Info, strings.TrimSpace(str[6:])
|
||||
case strings.HasPrefix(str, "[WARN]"):
|
||||
return Warn, strings.TrimSpace(str[6:])
|
||||
case strings.HasPrefix(str, "[ERROR]"):
|
||||
return Error, strings.TrimSpace(str[7:])
|
||||
case strings.HasPrefix(str, "[ERR]"):
|
||||
return Error, strings.TrimSpace(str[5:])
|
||||
default:
|
||||
return Info, str
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stdlogAdapter) trimTimestamp(str string) string {
|
||||
idx := logTimestampRegexp.FindStringIndex(str)
|
||||
return str[idx[1]:]
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
l *log.Logger
|
||||
}
|
||||
|
||||
func (l *logWriter) Write(b []byte) (int, error) {
|
||||
l.l.Println(string(bytes.TrimRight(b, " \n\t")))
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Takes a standard library logger and returns a Logger that will write to it
|
||||
func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger {
|
||||
var dl LoggerOptions = *opts
|
||||
|
||||
// Use the time format that log.Logger uses
|
||||
dl.DisableTime = true
|
||||
dl.Output = &logWriter{l}
|
||||
|
||||
return New(&dl)
|
||||
}
|
82
vendor/github.com/hashicorp/go-hclog/writer.go
generated
vendored
82
vendor/github.com/hashicorp/go-hclog/writer.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
package hclog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
type writer struct {
|
||||
b bytes.Buffer
|
||||
w io.Writer
|
||||
color ColorOption
|
||||
}
|
||||
|
||||
func newWriter(w io.Writer, color ColorOption) *writer {
|
||||
return &writer{w: w, color: color}
|
||||
}
|
||||
|
||||
func (w *writer) Flush(level Level) (err error) {
|
||||
var unwritten = w.b.Bytes()
|
||||
|
||||
if w.color != ColorOff {
|
||||
color := _levelToColor[level]
|
||||
unwritten = []byte(color.Sprintf("%s", unwritten))
|
||||
}
|
||||
|
||||
if lw, ok := w.w.(LevelWriter); ok {
|
||||
_, err = lw.LevelWrite(level, unwritten)
|
||||
} else {
|
||||
_, err = w.w.Write(unwritten)
|
||||
}
|
||||
w.b.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *writer) Write(p []byte) (int, error) {
|
||||
return w.b.Write(p)
|
||||
}
|
||||
|
||||
func (w *writer) WriteByte(c byte) error {
|
||||
return w.b.WriteByte(c)
|
||||
}
|
||||
|
||||
func (w *writer) WriteString(s string) (int, error) {
|
||||
return w.b.WriteString(s)
|
||||
}
|
||||
|
||||
// LevelWriter is the interface that wraps the LevelWrite method.
|
||||
type LevelWriter interface {
|
||||
LevelWrite(level Level, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// LeveledWriter writes all log messages to the standard writer,
|
||||
// except for log levels that are defined in the overrides map.
|
||||
type LeveledWriter struct {
|
||||
standard io.Writer
|
||||
overrides map[Level]io.Writer
|
||||
}
|
||||
|
||||
// NewLeveledWriter returns an initialized LeveledWriter.
|
||||
//
|
||||
// standard will be used as the default writer for all log levels,
|
||||
// except for log levels that are defined in the overrides map.
|
||||
func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
|
||||
return &LeveledWriter{
|
||||
standard: standard,
|
||||
overrides: overrides,
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (lw *LeveledWriter) Write(p []byte) (int, error) {
|
||||
return lw.standard.Write(p)
|
||||
}
|
||||
|
||||
// LevelWrite implements LevelWriter.
|
||||
func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
|
||||
w, ok := lw.overrides[level]
|
||||
if !ok {
|
||||
w = lw.standard
|
||||
}
|
||||
return w.Write(p)
|
||||
}
|
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
@ -1,23 +0,0 @@
|
||||
# UNRELEASED
|
||||
|
||||
# 1.3.0 (September 17th, 2020)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
|
||||
|
||||
# 1.2.0 (March 18th, 2020)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
|
||||
|
||||
# 1.1.0 (May 22nd, 2019)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
|
||||
|
||||
# 1.0.0 (August 30th, 2018)
|
||||
|
||||
* go mod adopted
|
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
@ -1,363 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
@ -1,66 +0,0 @@
|
||||
go-immutable-radix [](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
|
||||
=========
|
||||
|
||||
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
A tree supports using a transaction to batch multiple updates (insert, delete)
|
||||
in a more efficient manner than performing each operation one at a time.
|
||||
|
||||
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := iradix.New()
|
||||
r, _, _ = r.Insert([]byte("foo"), 1)
|
||||
r, _, _ = r.Insert([]byte("bar"), 2)
|
||||
r, _, _ = r.Insert([]byte("foobar"), 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
|
||||
if string(m) != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
||||
Here is an example of performing a range scan of the keys.
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := iradix.New()
|
||||
r, _, _ = r.Insert([]byte("001"), 1)
|
||||
r, _, _ = r.Insert([]byte("002"), 2)
|
||||
r, _, _ = r.Insert([]byte("005"), 5)
|
||||
r, _, _ = r.Insert([]byte("010"), 10)
|
||||
r, _, _ = r.Insert([]byte("100"), 10)
|
||||
|
||||
// Range scan over the keys that sort lexicographically between [003, 050)
|
||||
it := r.Root().Iterator()
|
||||
it.SeekLowerBound([]byte("003"))
|
||||
for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
|
||||
if key >= "050" {
|
||||
break
|
||||
}
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// 005
|
||||
// 010
|
||||
```
|
||||
|
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package iradix
|
||||
|
||||
import "sort"
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
@ -1,676 +0,0 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultModifiedCache is the default size of the modified node
|
||||
// cache used per transaction. This is used to cache the updates
|
||||
// to the nodes near the root, while the leaves do not need to be
|
||||
// cached. This is important for very large transactions to prevent
|
||||
// the modified cache from growing to be enormous. This is also used
|
||||
// to set the max size of the mutation notify maps since those should
|
||||
// also be bounded in a similar way.
|
||||
defaultModifiedCache = 8192
|
||||
)
|
||||
|
||||
// Tree implements an immutable radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over a standard
|
||||
// hash map is prefix-based lookups and ordered iteration. The immutability
|
||||
// means that it is safe to concurrently read from a Tree without any
|
||||
// coordination.
|
||||
type Tree struct {
|
||||
root *Node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
t := &Tree{
|
||||
root: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
},
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// Txn is a transaction on the tree. This transaction is applied
|
||||
// atomically and returns a new tree when committed. A transaction
|
||||
// is not thread safe, and should only be used by a single goroutine.
|
||||
type Txn struct {
|
||||
// root is the modified root for the transaction.
|
||||
root *Node
|
||||
|
||||
// snap is a snapshot of the root node for use if we have to run the
|
||||
// slow notify algorithm.
|
||||
snap *Node
|
||||
|
||||
// size tracks the size of the tree as it is modified during the
|
||||
// transaction.
|
||||
size int
|
||||
|
||||
// writable is a cache of writable nodes that have been created during
|
||||
// the course of the transaction. This allows us to re-use the same
|
||||
// nodes for further writes and avoid unnecessary copies of nodes that
|
||||
// have never been exposed outside the transaction. This will only hold
|
||||
// up to defaultModifiedCache number of entries.
|
||||
writable *simplelru.LRU
|
||||
|
||||
// trackChannels is used to hold channels that need to be notified to
|
||||
// signal mutation of the tree. This will only hold up to
|
||||
// defaultModifiedCache number of entries, after which we will set the
|
||||
// trackOverflow flag, which will cause us to use a more expensive
|
||||
// algorithm to perform the notifications. Mutation tracking is only
|
||||
// performed if trackMutate is true.
|
||||
trackChannels map[chan struct{}]struct{}
|
||||
trackOverflow bool
|
||||
trackMutate bool
|
||||
}
|
||||
|
||||
// Txn starts a new transaction that can be used to mutate the tree
|
||||
func (t *Tree) Txn() *Txn {
|
||||
txn := &Txn{
|
||||
root: t.root,
|
||||
snap: t.root,
|
||||
size: t.size,
|
||||
}
|
||||
return txn
|
||||
}
|
||||
|
||||
// Clone makes an independent copy of the transaction. The new transaction
|
||||
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
|
||||
func (t *Txn) Clone() *Txn {
|
||||
// reset the writable node cache to avoid leaking future writes into the clone
|
||||
t.writable = nil
|
||||
|
||||
txn := &Txn{
|
||||
root: t.root,
|
||||
snap: t.snap,
|
||||
size: t.size,
|
||||
}
|
||||
return txn
|
||||
}
|
||||
|
||||
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
|
||||
// then notifications will be issued for affected internal nodes and leaves when
|
||||
// the transaction is committed.
|
||||
func (t *Txn) TrackMutate(track bool) {
|
||||
t.trackMutate = track
|
||||
}
|
||||
|
||||
// trackChannel safely attempts to track the given mutation channel, setting the
|
||||
// overflow flag if we can no longer track any more. This limits the amount of
|
||||
// state that will accumulate during a transaction and we have a slower algorithm
|
||||
// to switch to if we overflow.
|
||||
func (t *Txn) trackChannel(ch chan struct{}) {
|
||||
// In overflow, make sure we don't store any more objects.
|
||||
if t.trackOverflow {
|
||||
return
|
||||
}
|
||||
|
||||
// If this would overflow the state we reject it and set the flag (since
|
||||
// we aren't tracking everything that's required any longer).
|
||||
if len(t.trackChannels) >= defaultModifiedCache {
|
||||
// Mark that we are in the overflow state
|
||||
t.trackOverflow = true
|
||||
|
||||
// Clear the map so that the channels can be garbage collected. It is
|
||||
// safe to do this since we have already overflowed and will be using
|
||||
// the slow notify algorithm.
|
||||
t.trackChannels = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Create the map on the fly when we need it.
|
||||
if t.trackChannels == nil {
|
||||
t.trackChannels = make(map[chan struct{}]struct{})
|
||||
}
|
||||
|
||||
// Otherwise we are good to track it.
|
||||
t.trackChannels[ch] = struct{}{}
|
||||
}
|
||||
|
||||
// writeNode returns a node to be modified, if the current node has already been
|
||||
// modified during the course of the transaction, it is used in-place. Set
|
||||
// forLeafUpdate to true if you are getting a write node to update the leaf,
|
||||
// which will set leaf mutation tracking appropriately as well.
|
||||
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
|
||||
// Ensure the writable set exists.
|
||||
if t.writable == nil {
|
||||
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.writable = lru
|
||||
}
|
||||
|
||||
// If this node has already been modified, we can continue to use it
|
||||
// during this transaction. We know that we don't need to track it for
|
||||
// a node update since the node is writable, but if this is for a leaf
|
||||
// update we track it, in case the initial write to this node didn't
|
||||
// update the leaf.
|
||||
if _, ok := t.writable.Get(n); ok {
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Copy the existing node. If you have set forLeafUpdate it will be
|
||||
// safe to replace this leaf with another after you get your node for
|
||||
// writing. You MUST replace it, because the channel associated with
|
||||
// this leaf will be closed when this transaction is committed.
|
||||
nc := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: n.leaf,
|
||||
}
|
||||
if n.prefix != nil {
|
||||
nc.prefix = make([]byte, len(n.prefix))
|
||||
copy(nc.prefix, n.prefix)
|
||||
}
|
||||
if len(n.edges) != 0 {
|
||||
nc.edges = make([]edge, len(n.edges))
|
||||
copy(nc.edges, n.edges)
|
||||
}
|
||||
|
||||
// Mark this node as writable.
|
||||
t.writable.Add(nc, nil)
|
||||
return nc
|
||||
}
|
||||
|
||||
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
|
||||
// Returns the size of the subtree visited
|
||||
func (t *Txn) trackChannelsAndCount(n *Node) int {
|
||||
// Count only leaf nodes
|
||||
leaves := 0
|
||||
if n.leaf != nil {
|
||||
leaves = 1
|
||||
}
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
leaves += t.trackChannelsAndCount(e.node)
|
||||
}
|
||||
return leaves
|
||||
}
|
||||
|
||||
// mergeChild is called to collapse the given node with its child. This is only
|
||||
// called when the given node is not a leaf and has a single edge.
|
||||
func (t *Txn) mergeChild(n *Node) {
|
||||
// Mark the child node as being mutated since we are about to abandon
|
||||
// it. We don't need to mark the leaf since we are retaining it if it
|
||||
// is there.
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
if t.trackMutate {
|
||||
t.trackChannel(child.mutateCh)
|
||||
}
|
||||
|
||||
// Merge the nodes.
|
||||
n.prefix = concat(n.prefix, child.prefix)
|
||||
n.leaf = child.leaf
|
||||
if len(child.edges) != 0 {
|
||||
n.edges = make([]edge, len(child.edges))
|
||||
copy(n.edges, child.edges)
|
||||
} else {
|
||||
n.edges = nil
|
||||
}
|
||||
}
|
||||
|
||||
// insert does a recursive insertion
|
||||
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
|
||||
// Handle key exhaustion
|
||||
if len(search) == 0 {
|
||||
var oldVal interface{}
|
||||
didUpdate := false
|
||||
if n.isLeaf() {
|
||||
oldVal = n.leaf.val
|
||||
didUpdate = true
|
||||
}
|
||||
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
idx, child := n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if child == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
nc := t.writeNode(n, false)
|
||||
nc.addEdge(e)
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, child.prefix)
|
||||
if commonPrefix == len(child.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
|
||||
if newChild != nil {
|
||||
nc := t.writeNode(n, false)
|
||||
nc.edges[idx].node = newChild
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
return nil, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Split the node
|
||||
nc := t.writeNode(n, false)
|
||||
splitNode := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
nc.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: splitNode,
|
||||
})
|
||||
|
||||
// Restore the existing child node
|
||||
modChild := t.writeNode(child, false)
|
||||
splitNode.addEdge(edge{
|
||||
label: modChild.prefix[commonPrefix],
|
||||
node: modChild,
|
||||
})
|
||||
modChild.prefix = modChild.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
splitNode.leaf = leaf
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
splitNode.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
return nil, nil
|
||||
}
|
||||
// Copy the pointer in case we are in a transaction that already
|
||||
// modified this node since the node will be reused. Any changes
|
||||
// made to the node will not affect returning the original leaf
|
||||
// value.
|
||||
oldLeaf := n.leaf
|
||||
|
||||
// Remove the leaf node
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = nil
|
||||
|
||||
// Check if this node should be merged
|
||||
if n != t.root && len(nc.edges) == 1 {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
return nc, oldLeaf
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
if child == nil || !bytes.HasPrefix(search, child.prefix) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
search = search[len(child.prefix):]
|
||||
newChild, leaf := t.delete(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, leaf
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
nc := t.writeNode(n, true)
|
||||
if n.isLeaf() {
|
||||
nc.leaf = nil
|
||||
}
|
||||
nc.edges = nil
|
||||
return nc, t.trackChannelsAndCount(n)
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
|
||||
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
|
||||
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if len(child.prefix) > len(search) {
|
||||
search = []byte("")
|
||||
} else {
|
||||
search = search[len(child.prefix):]
|
||||
}
|
||||
newChild, numDeletions := t.deletePrefix(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, 0
|
||||
}
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, numDeletions
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the previous value and a bool indicating if any was set.
|
||||
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
|
||||
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if !didUpdate {
|
||||
t.size++
|
||||
}
|
||||
return oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the old value if any,
|
||||
// and a bool indicating if the key was set.
|
||||
func (t *Txn) Delete(k []byte) (interface{}, bool) {
|
||||
newRoot, leaf := t.delete(nil, t.root, k)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if leaf != nil {
|
||||
t.size--
|
||||
return leaf.val, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete an entire subtree that matches the prefix
|
||||
// This will delete all nodes under that prefix
|
||||
func (t *Txn) DeletePrefix(prefix []byte) bool {
|
||||
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
t.size = t.size - numDeletions
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
// Root returns the current root of the radix tree within this
|
||||
// transaction. The root is not safe across insert and delete operations,
|
||||
// but can be used to read the current state during a transaction.
|
||||
func (t *Txn) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Txn) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// GetWatch is used to lookup a specific key, returning
|
||||
// the watch channel, value and if it was found
|
||||
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
return t.root.GetWatch(k)
|
||||
}
|
||||
|
||||
// Commit is used to finalize the transaction and return a new tree. If mutation
|
||||
// tracking is turned on then notifications will also be issued.
|
||||
func (t *Txn) Commit() *Tree {
|
||||
nt := t.CommitOnly()
|
||||
if t.trackMutate {
|
||||
t.Notify()
|
||||
}
|
||||
return nt
|
||||
}
|
||||
|
||||
// CommitOnly is used to finalize the transaction and return a new tree, but
|
||||
// does not issue any notifications until Notify is called.
|
||||
func (t *Txn) CommitOnly() *Tree {
|
||||
nt := &Tree{t.root, t.size}
|
||||
t.writable = nil
|
||||
return nt
|
||||
}
|
||||
|
||||
// slowNotify does a complete comparison of the before and after trees in order
|
||||
// to trigger notifications. This doesn't require any additional state but it
|
||||
// is very expensive to compute.
|
||||
func (t *Txn) slowNotify() {
|
||||
snapIter := t.snap.rawIterator()
|
||||
rootIter := t.root.rawIterator()
|
||||
for snapIter.Front() != nil || rootIter.Front() != nil {
|
||||
// If we've exhausted the nodes in the old snapshot, we know
|
||||
// there's nothing remaining to notify.
|
||||
if snapIter.Front() == nil {
|
||||
return
|
||||
}
|
||||
snapElem := snapIter.Front()
|
||||
|
||||
// If we've exhausted the nodes in the new root, we know we need
|
||||
// to invalidate everything that remains in the old snapshot. We
|
||||
// know from the loop condition there's something in the old
|
||||
// snapshot.
|
||||
if rootIter.Front() == nil {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// Do one string compare so we can check the various conditions
|
||||
// below without repeating the compare.
|
||||
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
|
||||
|
||||
// If the snapshot is behind the root, then we must have deleted
|
||||
// this node during the transaction.
|
||||
if cmp < 0 {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If the snapshot is ahead of the root, then we must have added
|
||||
// this node during the transaction.
|
||||
if cmp > 0 {
|
||||
rootIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If we have the same path, then we need to see if we mutated a
|
||||
// node and possibly the leaf.
|
||||
rootElem := rootIter.Front()
|
||||
if snapElem != rootElem {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
}
|
||||
snapIter.Next()
|
||||
rootIter.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is used along with TrackMutate to trigger notifications. This must
|
||||
// only be done once a transaction is committed via CommitOnly, and it is called
|
||||
// automatically by Commit.
|
||||
func (t *Txn) Notify() {
|
||||
if !t.trackMutate {
|
||||
return
|
||||
}
|
||||
|
||||
// If we've overflowed the tracking state we can't use it in any way and
|
||||
// need to do a full tree compare.
|
||||
if t.trackOverflow {
|
||||
t.slowNotify()
|
||||
} else {
|
||||
for ch := range t.trackChannels {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the tracking state so that a re-notify is safe (will trigger
|
||||
// the else clause above which will be a no-op).
|
||||
t.trackChannels = nil
|
||||
t.trackOverflow = false
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the new tree, previous value and a bool indicating if any was set.
|
||||
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Insert(k, v)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the new tree,
|
||||
// old value if any, and a bool indicating if the key was set.
|
||||
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Delete(k)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
|
||||
// and a bool indicating if the prefix matched any nodes
|
||||
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
|
||||
txn := t.Txn()
|
||||
ok := txn.DeletePrefix(k)
|
||||
return txn.Commit(), ok
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree which can be used for richer
|
||||
// query operations.
|
||||
func (t *Tree) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 []byte) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// concat two byte slices, returning a third new copy
|
||||
func concat(a, b []byte) []byte {
|
||||
c := make([]byte, len(a)+len(b))
|
||||
copy(c, a)
|
||||
copy(c[len(a):], b)
|
||||
return c
|
||||
}
|
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
@ -1,205 +0,0 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Iterator is used to iterate over a set of nodes
|
||||
// in pre-order
|
||||
type Iterator struct {
|
||||
node *Node
|
||||
stack []edges
|
||||
}
|
||||
|
||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
||||
// and returns the watch channel of the finest granularity
|
||||
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
||||
// Wipe the stack
|
||||
i.stack = nil
|
||||
n := i.node
|
||||
watch = n.mutateCh
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
i.node = n
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
i.node = n
|
||||
return
|
||||
} else {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekPrefix is used to seek the iterator to a given prefix
|
||||
func (i *Iterator) SeekPrefix(prefix []byte) {
|
||||
i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
func (i *Iterator) recurseMin(n *Node) *Node {
|
||||
// Traverse to the minimum child
|
||||
if n.leaf != nil {
|
||||
return n
|
||||
}
|
||||
nEdges := len(n.edges)
|
||||
if nEdges > 1 {
|
||||
// Add all the other edges to the stack (the min node will be added as
|
||||
// we recurse)
|
||||
i.stack = append(i.stack, n.edges[1:])
|
||||
}
|
||||
if nEdges > 0 {
|
||||
return i.recurseMin(n.edges[0].node)
|
||||
}
|
||||
// Shouldn't be possible
|
||||
return nil
|
||||
}
|
||||
|
||||
// SeekLowerBound is used to seek the iterator to the smallest key that is
|
||||
// greater or equal to the given key. There is no watch variant as it's hard to
|
||||
// predict based on the radix structure which node(s) changes might affect the
|
||||
// result.
|
||||
func (i *Iterator) SeekLowerBound(key []byte) {
|
||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
||||
// go because we need only a subset of edges of many nodes in the path to the
|
||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
||||
// children that we don't traverse on the way to the reverse lower bound as it
|
||||
// walks the stack.
|
||||
i.stack = []edges{}
|
||||
// i.node starts off in the common case as pointing to the root node of the
|
||||
// tree. By the time we return we have either found a lower bound and setup
|
||||
// the stack to traverse all larger keys, or we have not and the stack and
|
||||
// node should both be nil to prevent the iterator from assuming it is just
|
||||
// iterating the whole tree from the root node. Either way this needs to end
|
||||
// up as nil so just set it here.
|
||||
n := i.node
|
||||
i.node = nil
|
||||
search := key
|
||||
|
||||
found := func(n *Node) {
|
||||
i.stack = append(i.stack, edges{edge{node: n}})
|
||||
}
|
||||
|
||||
findMin := func(n *Node) {
|
||||
n = i.recurseMin(n)
|
||||
if n != nil {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Compare current prefix with the search key's same-length prefix.
|
||||
var prefixCmp int
|
||||
if len(n.prefix) < len(search) {
|
||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
||||
} else {
|
||||
prefixCmp = bytes.Compare(n.prefix, search)
|
||||
}
|
||||
|
||||
if prefixCmp > 0 {
|
||||
// Prefix is larger, that means the lower bound is greater than the search
|
||||
// and from now on we need to follow the minimum path to the smallest
|
||||
// leaf under this subtree.
|
||||
findMin(n)
|
||||
return
|
||||
}
|
||||
|
||||
if prefixCmp < 0 {
|
||||
// Prefix is smaller than search prefix, that means there is no lower
|
||||
// bound
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Prefix is equal, we are still heading for an exact match. If this is a
|
||||
// leaf and an exact match we're done.
|
||||
if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix if the current node has one. Note that this is
|
||||
// safe because if n.prefix is longer than the search slice prefixCmp would
|
||||
// have been > 0 above and the method would have already returned.
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
if len(search) == 0 {
|
||||
// We've exhausted the search key, but the current node is not an exact
|
||||
// match or not a leaf. That means that the leaf value if it exists, and
|
||||
// all child nodes must be strictly greater, the smallest key in this
|
||||
// subtree must be the lower bound.
|
||||
findMin(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, take the lower bound next edge.
|
||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
||||
if lbNode == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Create stack edges for the all strictly higher edges in this node.
|
||||
if idx+1 < len(n.edges) {
|
||||
i.stack = append(i.stack, n.edges[idx+1:])
|
||||
}
|
||||
|
||||
// Recurse
|
||||
n = lbNode
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next node in order
|
||||
func (i *Iterator) Next() ([]byte, interface{}, bool) {
|
||||
// Initialize our stack if needed
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []edges{
|
||||
{
|
||||
edge{node: i.node},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last[0].node
|
||||
|
||||
// Update the stack
|
||||
if len(last) > 1 {
|
||||
i.stack[n-1] = last[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier
|
||||
if len(elem.edges) > 0 {
|
||||
i.stack = append(i.stack, elem.edges)
|
||||
}
|
||||
|
||||
// Return the leaf values if any
|
||||
if elem.leaf != nil {
|
||||
return elem.leaf.key, elem.leaf.val, true
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
@ -1,334 +0,0 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(k []byte, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
mutateCh chan struct{}
|
||||
key []byte
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Node is an immutable node in the radix tree
|
||||
type Node struct {
|
||||
// mutateCh is closed if this node is modified
|
||||
mutateCh chan struct{}
|
||||
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix []byte
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *Node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *Node) addEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
n.edges = append(n.edges, e)
|
||||
if idx != num {
|
||||
copy(n.edges[idx+1:], n.edges[idx:num])
|
||||
n.edges[idx] = e
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *Node) getEdge(label byte) (int, *Node) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return idx, n.edges[idx].node
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
// we want lower bound behavior so return even if it's not an exact match
|
||||
if idx < num {
|
||||
return idx, n.edges[idx].node
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (n *Node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
search := k
|
||||
watch := n.mutateCh
|
||||
for {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.mutateCh, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return watch, nil, false
|
||||
}
|
||||
|
||||
func (n *Node) Get(k []byte) (interface{}, bool) {
|
||||
_, val, ok := n.GetWatch(k)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
|
||||
var last *leafNode
|
||||
search := k
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (n *Node) Minimum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (n *Node) Maximum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Iterator is used to return an iterator at
|
||||
// the given node to walk the tree
|
||||
func (n *Node) Iterator() *Iterator {
|
||||
return &Iterator{node: n}
|
||||
}
|
||||
|
||||
// ReverseIterator is used to return an iterator at
|
||||
// the given node to walk the tree backwards
|
||||
func (n *Node) ReverseIterator() *ReverseIterator {
|
||||
return NewReverseIterator(n)
|
||||
}
|
||||
|
||||
// rawIterator is used to return a raw iterator at the given node to walk the
|
||||
// tree.
|
||||
func (n *Node) rawIterator() *rawIterator {
|
||||
iter := &rawIterator{node: n}
|
||||
iter.Next()
|
||||
return iter
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (n *Node) Walk(fn WalkFn) {
|
||||
recursiveWalk(n, fn)
|
||||
}
|
||||
|
||||
// WalkBackwards is used to walk the tree in reverse order
|
||||
func (n *Node) WalkBackwards(fn WalkFn) {
|
||||
reverseRecursiveWalk(n, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (n *Node) WalkPath(path []byte, fn WalkFn) {
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *Node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// reverseRecursiveWalk is used to do a reverse pre-order
|
||||
// walk of a node recursively. Returns true if the walk
|
||||
// should be aborted
|
||||
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children in reverse order
|
||||
for i := len(n.edges) - 1; i >= 0; i-- {
|
||||
e := n.edges[i]
|
||||
if reverseRecursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
package iradix
|
||||
|
||||
// rawIterator visits each of the nodes in the tree, even the ones that are not
|
||||
// leaves. It keeps track of the effective path (what a leaf at a given node
|
||||
// would be called), which is useful for comparing trees.
|
||||
type rawIterator struct {
|
||||
// node is the starting node in the tree for the iterator.
|
||||
node *Node
|
||||
|
||||
// stack keeps track of edges in the frontier.
|
||||
stack []rawStackEntry
|
||||
|
||||
// pos is the current position of the iterator.
|
||||
pos *Node
|
||||
|
||||
// path is the effective path of the current iterator position,
|
||||
// regardless of whether the current node is a leaf.
|
||||
path string
|
||||
}
|
||||
|
||||
// rawStackEntry is used to keep track of the cumulative common path as well as
|
||||
// its associated edges in the frontier.
|
||||
type rawStackEntry struct {
|
||||
path string
|
||||
edges edges
|
||||
}
|
||||
|
||||
// Front returns the current node that has been iterated to.
|
||||
func (i *rawIterator) Front() *Node {
|
||||
return i.pos
|
||||
}
|
||||
|
||||
// Path returns the effective path of the current node, even if it's not actually
|
||||
// a leaf.
|
||||
func (i *rawIterator) Path() string {
|
||||
return i.path
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next node.
|
||||
func (i *rawIterator) Next() {
|
||||
// Initialize our stack if needed.
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []rawStackEntry{
|
||||
{
|
||||
edges: edges{
|
||||
edge{node: i.node},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack.
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last.edges[0].node
|
||||
|
||||
// Update the stack.
|
||||
if len(last.edges) > 1 {
|
||||
i.stack[n-1].edges = last.edges[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier.
|
||||
if len(elem.edges) > 0 {
|
||||
path := last.path + string(elem.prefix)
|
||||
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
|
||||
}
|
||||
|
||||
i.pos = elem
|
||||
i.path = last.path + string(elem.prefix)
|
||||
return
|
||||
}
|
||||
|
||||
i.pos = nil
|
||||
i.path = ""
|
||||
}
|
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
@ -1,239 +0,0 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// ReverseIterator is used to iterate over a set of nodes
|
||||
// in reverse in-order
|
||||
type ReverseIterator struct {
|
||||
i *Iterator
|
||||
|
||||
// expandedParents stores the set of parent nodes whose relevant children have
|
||||
// already been pushed into the stack. This can happen during seek or during
|
||||
// iteration.
|
||||
//
|
||||
// Unlike forward iteration we need to recurse into children before we can
|
||||
// output the value stored in an internal leaf since all children are greater.
|
||||
// We use this to track whether we have already ensured all the children are
|
||||
// in the stack.
|
||||
expandedParents map[*Node]struct{}
|
||||
}
|
||||
|
||||
// NewReverseIterator returns a new ReverseIterator at a node
|
||||
func NewReverseIterator(n *Node) *ReverseIterator {
|
||||
return &ReverseIterator{
|
||||
i: &Iterator{node: n},
|
||||
}
|
||||
}
|
||||
|
||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
||||
// and returns the watch channel of the finest granularity
|
||||
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
||||
return ri.i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
// SeekPrefix is used to seek the iterator to a given prefix
|
||||
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
|
||||
ri.i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
|
||||
// lower or equal to the given key. There is no watch variant as it's hard to
|
||||
// predict based on the radix structure which node(s) changes might affect the
|
||||
// result.
|
||||
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
|
||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
||||
// go because we need only a subset of edges of many nodes in the path to the
|
||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
||||
// children that we don't traverse on the way to the reverse lower bound as it
|
||||
// walks the stack.
|
||||
ri.i.stack = []edges{}
|
||||
// ri.i.node starts off in the common case as pointing to the root node of the
|
||||
// tree. By the time we return we have either found a lower bound and setup
|
||||
// the stack to traverse all larger keys, or we have not and the stack and
|
||||
// node should both be nil to prevent the iterator from assuming it is just
|
||||
// iterating the whole tree from the root node. Either way this needs to end
|
||||
// up as nil so just set it here.
|
||||
n := ri.i.node
|
||||
ri.i.node = nil
|
||||
search := key
|
||||
|
||||
if ri.expandedParents == nil {
|
||||
ri.expandedParents = make(map[*Node]struct{})
|
||||
}
|
||||
|
||||
found := func(n *Node) {
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
// We need to mark this node as expanded in advance too otherwise the
|
||||
// iterator will attempt to walk all of its children even though they are
|
||||
// greater than the lower bound we have found. We've expanded it in the
|
||||
// sense that all of its children that we want to walk are already in the
|
||||
// stack (i.e. none of them).
|
||||
ri.expandedParents[n] = struct{}{}
|
||||
}
|
||||
|
||||
for {
|
||||
// Compare current prefix with the search key's same-length prefix.
|
||||
var prefixCmp int
|
||||
if len(n.prefix) < len(search) {
|
||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
||||
} else {
|
||||
prefixCmp = bytes.Compare(n.prefix, search)
|
||||
}
|
||||
|
||||
if prefixCmp < 0 {
|
||||
// Prefix is smaller than search prefix, that means there is no exact
|
||||
// match for the search key. But we are looking in reverse, so the reverse
|
||||
// lower bound will be the largest leaf under this subtree, since it is
|
||||
// the value that would come right before the current search key if it
|
||||
// were in the tree. So we need to follow the maximum path in this subtree
|
||||
// to find it. Note that this is exactly what the iterator will already do
|
||||
// if it finds a node in the stack that has _not_ been marked as expanded
|
||||
// so in this one case we don't call `found` and instead let the iterator
|
||||
// do the expansion and recursion through all the children.
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
return
|
||||
}
|
||||
|
||||
if prefixCmp > 0 {
|
||||
// Prefix is larger than search prefix, or there is no prefix but we've
|
||||
// also exhausted the search key. Either way, that means there is no
|
||||
// reverse lower bound since nothing comes before our current search
|
||||
// prefix.
|
||||
return
|
||||
}
|
||||
|
||||
// If this is a leaf, something needs to happen! Note that if it's a leaf
|
||||
// and prefixCmp was zero (which it must be to get here) then the leaf value
|
||||
// is either an exact match for the search, or it's lower. It can't be
|
||||
// greater.
|
||||
if n.isLeaf() {
|
||||
|
||||
// Firstly, if it's an exact match, we're done!
|
||||
if bytes.Equal(n.leaf.key, key) {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// It's not so this node's leaf value must be lower and could still be a
|
||||
// valid contender for reverse lower bound.
|
||||
|
||||
// If it has no children then we are also done.
|
||||
if len(n.edges) == 0 {
|
||||
// This leaf is the lower bound.
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Finally, this leaf is internal (has children) so we'll keep searching,
|
||||
// but we need to add it to the iterator's stack since it has a leaf value
|
||||
// that needs to be iterated over. It needs to be added to the stack
|
||||
// before its children below as it comes first.
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
// We also need to mark it as expanded since we'll be adding any of its
|
||||
// relevant children below and so don't want the iterator to re-add them
|
||||
// on its way back up the stack.
|
||||
ri.expandedParents[n] = struct{}{}
|
||||
}
|
||||
|
||||
// Consume the search prefix. Note that this is safe because if n.prefix is
|
||||
// longer than the search slice prefixCmp would have been > 0 above and the
|
||||
// method would have already returned.
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
if len(search) == 0 {
|
||||
// We've exhausted the search key but we are not at a leaf. That means all
|
||||
// children are greater than the search key so a reverse lower bound
|
||||
// doesn't exist in this subtree. Note that there might still be one in
|
||||
// the whole radix tree by following a different path somewhere further
|
||||
// up. If that's the case then the iterator's stack will contain all the
|
||||
// smaller nodes already and Previous will walk through them correctly.
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, take the lower bound next edge.
|
||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
||||
|
||||
// From here, we need to update the stack with all values lower than
|
||||
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
|
||||
// search prefix is larger than all edges, we need to place idx at the
|
||||
// last edge index so they can all be place in the stack, since they
|
||||
// come before our search prefix.
|
||||
if idx == -1 {
|
||||
idx = len(n.edges)
|
||||
}
|
||||
|
||||
// Create stack edges for the all strictly lower edges in this node.
|
||||
if len(n.edges[:idx]) > 0 {
|
||||
ri.i.stack = append(ri.i.stack, n.edges[:idx])
|
||||
}
|
||||
|
||||
// Exit if there's no lower bound edge. The stack will have the previous
|
||||
// nodes already.
|
||||
if lbNode == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Recurse
|
||||
n = lbNode
|
||||
}
|
||||
}
|
||||
|
||||
// Previous returns the previous node in reverse order
|
||||
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
|
||||
// Initialize our stack if needed
|
||||
if ri.i.stack == nil && ri.i.node != nil {
|
||||
ri.i.stack = []edges{
|
||||
{
|
||||
edge{node: ri.i.node},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if ri.expandedParents == nil {
|
||||
ri.expandedParents = make(map[*Node]struct{})
|
||||
}
|
||||
|
||||
for len(ri.i.stack) > 0 {
|
||||
// Inspect the last element of the stack
|
||||
n := len(ri.i.stack)
|
||||
last := ri.i.stack[n-1]
|
||||
m := len(last)
|
||||
elem := last[m-1].node
|
||||
|
||||
_, alreadyExpanded := ri.expandedParents[elem]
|
||||
|
||||
// If this is an internal node and we've not seen it already, we need to
|
||||
// leave it in the stack so we can return its possible leaf value _after_
|
||||
// we've recursed through all its children.
|
||||
if len(elem.edges) > 0 && !alreadyExpanded {
|
||||
// record that we've seen this node!
|
||||
ri.expandedParents[elem] = struct{}{}
|
||||
// push child edges onto stack and skip the rest of the loop to recurse
|
||||
// into the largest one.
|
||||
ri.i.stack = append(ri.i.stack, elem.edges)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the node from the stack
|
||||
if m > 1 {
|
||||
ri.i.stack[n-1] = last[:m-1]
|
||||
} else {
|
||||
ri.i.stack = ri.i.stack[:n-1]
|
||||
}
|
||||
// We don't need this state any more as it's no longer in the stack so we
|
||||
// won't visit it again
|
||||
if alreadyExpanded {
|
||||
delete(ri.expandedParents, elem)
|
||||
}
|
||||
|
||||
// If this is a leaf, return it
|
||||
if elem.leaf != nil {
|
||||
return elem.leaf.key, elem.leaf.val, true
|
||||
}
|
||||
|
||||
// it's not a leaf so keep walking the stack to find the previous leaf
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
19
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
19
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
@ -45,6 +45,25 @@ The returned response object is an `*http.Response`, the same thing you would
|
||||
usually get from `net/http`. Had the request failed one or more times, the above
|
||||
call would block and retry with exponential backoff.
|
||||
|
||||
## Retrying cases that fail after a seeming success
|
||||
|
||||
It's possible for a request to succeed in the sense that the expected response headers are received, but then to encounter network-level errors while reading the response body. In go-retryablehttp's most basic usage, this error would not be retryable, due to the out-of-band handling of the response body. In some cases it may be desirable to handle the response body as part of the retryable operation.
|
||||
|
||||
A toy example (which will retry the full request and succeed on the second attempt) is shown below:
|
||||
|
||||
```go
|
||||
c := retryablehttp.NewClient()
|
||||
r := retryablehttp.NewRequest("GET", "://foo", nil)
|
||||
handlerShouldRetry := true
|
||||
r.SetResponseHandler(func(*http.Response) error {
|
||||
if !handlerShouldRetry {
|
||||
return nil
|
||||
}
|
||||
handlerShouldRetry = false
|
||||
return errors.New("retryable error")
|
||||
})
|
||||
```
|
||||
|
||||
## Getting a stdlib `*http.Client` with retries
|
||||
|
||||
It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`.
|
||||
|
85
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
85
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
@ -69,11 +69,21 @@ var (
|
||||
// scheme specified in the URL is invalid. This error isn't typed
|
||||
// specifically so we resort to matching on the error string.
|
||||
schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`)
|
||||
|
||||
// A regular expression to match the error returned by net/http when the
|
||||
// TLS certificate is not trusted. This error isn't typed
|
||||
// specifically so we resort to matching on the error string.
|
||||
notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`)
|
||||
)
|
||||
|
||||
// ReaderFunc is the type of function that can be given natively to NewRequest
|
||||
type ReaderFunc func() (io.Reader, error)
|
||||
|
||||
// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it.
|
||||
// It only runs if the initial part of the request was successful.
|
||||
// If an error is returned, the client's retry policy will be used to determine whether to retry the whole request.
|
||||
type ResponseHandlerFunc func(*http.Response) error
|
||||
|
||||
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
||||
// for automatically sending the right Content-Length header when possible.
|
||||
type LenReader interface {
|
||||
@ -86,6 +96,8 @@ type Request struct {
|
||||
// used to rewind the request data in between retries.
|
||||
body ReaderFunc
|
||||
|
||||
responseHandler ResponseHandlerFunc
|
||||
|
||||
// Embed an HTTP request directly. This makes a *Request act exactly
|
||||
// like an *http.Request so that all meta methods are supported.
|
||||
*http.Request
|
||||
@ -94,8 +106,16 @@ type Request struct {
|
||||
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
|
||||
// with its context changed to ctx. The provided ctx must be non-nil.
|
||||
func (r *Request) WithContext(ctx context.Context) *Request {
|
||||
r.Request = r.Request.WithContext(ctx)
|
||||
return r
|
||||
return &Request{
|
||||
body: r.body,
|
||||
responseHandler: r.responseHandler,
|
||||
Request: r.Request.WithContext(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// SetResponseHandler allows setting the response handler.
|
||||
func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) {
|
||||
r.responseHandler = fn
|
||||
}
|
||||
|
||||
// BodyBytes allows accessing the request body. It is an analogue to
|
||||
@ -252,23 +272,31 @@ func FromRequest(r *http.Request) (*Request, error) {
|
||||
return nil, err
|
||||
}
|
||||
// Could assert contentLength == r.ContentLength
|
||||
return &Request{bodyReader, r}, nil
|
||||
return &Request{body: bodyReader, Request: r}, nil
|
||||
}
|
||||
|
||||
// NewRequest creates a new wrapped request.
|
||||
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
|
||||
return NewRequestWithContext(context.Background(), method, url, rawBody)
|
||||
}
|
||||
|
||||
// NewRequestWithContext creates a new wrapped request with the provided context.
|
||||
//
|
||||
// The context controls the entire lifetime of a request and its response:
|
||||
// obtaining a connection, sending the request, and reading the response headers and body.
|
||||
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
|
||||
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest(method, url, nil)
|
||||
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpReq.ContentLength = contentLength
|
||||
|
||||
return &Request{bodyReader, httpReq}, nil
|
||||
return &Request{body: bodyReader, Request: httpReq}, nil
|
||||
}
|
||||
|
||||
// Logger interface allows to use other loggers than
|
||||
@ -435,6 +463,9 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
// Don't retry if the error was due to TLS cert verification failure.
|
||||
if notTrustedErrorRe.MatchString(v.Error()) {
|
||||
return false, v
|
||||
}
|
||||
if _, ok := v.Err.(x509.UnknownAuthorityError); ok {
|
||||
return false, v
|
||||
}
|
||||
@ -455,7 +486,7 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
// the server time to recover, as 500's are typically not permanent
|
||||
// errors and may relate to outages on the server side. This will catch
|
||||
// invalid response codes as well, like 0 and 999.
|
||||
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
|
||||
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) {
|
||||
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
@ -555,13 +586,12 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var attempt int
|
||||
var shouldRetry bool
|
||||
var doErr, checkErr error
|
||||
var doErr, respErr, checkErr error
|
||||
|
||||
for i := 0; ; i++ {
|
||||
doErr, respErr = nil, nil
|
||||
attempt++
|
||||
|
||||
var code int // HTTP response code
|
||||
|
||||
// Always rewind the request body when non-nil.
|
||||
if req.body != nil {
|
||||
body, err := req.body()
|
||||
@ -589,19 +619,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
|
||||
// Attempt the request
|
||||
resp, doErr = c.HTTPClient.Do(req.Request)
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should continue with retries.
|
||||
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
|
||||
if !shouldRetry && doErr == nil && req.responseHandler != nil {
|
||||
respErr = req.responseHandler(resp)
|
||||
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr)
|
||||
}
|
||||
|
||||
if doErr != nil {
|
||||
err := doErr
|
||||
if respErr != nil {
|
||||
err = respErr
|
||||
}
|
||||
if err != nil {
|
||||
switch v := logger.(type) {
|
||||
case LeveledLogger:
|
||||
v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL)
|
||||
v.Error("request failed", "error", err, "method", req.Method, "url", req.URL)
|
||||
case Logger:
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr)
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||
}
|
||||
} else {
|
||||
// Call this here to maintain the behavior of logging all requests,
|
||||
@ -636,11 +671,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
|
||||
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
|
||||
if code > 0 {
|
||||
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
||||
}
|
||||
if logger != nil {
|
||||
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
|
||||
if resp != nil {
|
||||
desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode)
|
||||
}
|
||||
switch v := logger.(type) {
|
||||
case LeveledLogger:
|
||||
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
|
||||
@ -648,11 +683,13 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||
}
|
||||
}
|
||||
timer := time.NewTimer(wait)
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
c.HTTPClient.CloseIdleConnections()
|
||||
return nil, req.Context().Err()
|
||||
case <-time.After(wait):
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
// Make shallow copy of http Request so that we can modify its body
|
||||
@ -662,15 +699,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
// this is the closest we have to success criteria
|
||||
if doErr == nil && checkErr == nil && !shouldRetry {
|
||||
if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
defer c.HTTPClient.CloseIdleConnections()
|
||||
|
||||
err := doErr
|
||||
var err error
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
} else if respErr != nil {
|
||||
err = respErr
|
||||
} else {
|
||||
err = doErr
|
||||
}
|
||||
|
||||
if c.ErrorHandler != nil {
|
||||
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
@ -1,362 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
var ent *list.Element
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package simplelru
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
type LRUCache interface {
|
||||
// Adds a value to the cache, returns true if an eviction occurred and
|
||||
// updates the "recently used"-ness of the key.
|
||||
Add(key, value interface{}) bool
|
||||
|
||||
// Returns key's value from the cache and
|
||||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Removes a key from the cache.
|
||||
Remove(key interface{}) bool
|
||||
|
||||
// Removes the oldest entry from cache.
|
||||
RemoveOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns the oldest entry from the cache. #key, value, isFound
|
||||
GetOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []interface{}
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clears all cache entries.
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
12
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
12
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
@ -632,12 +632,16 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
// fill unusedNodeKeys with keys from the AST
|
||||
// a slice because we have to do equals case fold to match Filter
|
||||
unusedNodeKeys := make(map[string][]token.Pos, 0)
|
||||
for _, item := range list.Items {
|
||||
for _, k := range item.Keys{
|
||||
if k.Token.JSON || k.Token.Type == token.IDENT {
|
||||
for i, item := range list.Items {
|
||||
for _, k := range item.Keys {
|
||||
// isNestedJSON returns true for e.g. bar in
|
||||
// { "foo": { "bar": {...} } }
|
||||
// This isn't an unused node key, so we want to skip it
|
||||
isNestedJSON := i > 0 && len(item.Keys) > 1
|
||||
if !isNestedJSON && (k.Token.JSON || k.Token.Type == token.IDENT) {
|
||||
fn := k.Token.Value().(string)
|
||||
sl := unusedNodeKeys[fn]
|
||||
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
|
||||
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
363
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
363
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
@ -1,363 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
61
vendor/github.com/hashicorp/vault/api/auth/approle/LICENSE
generated
vendored
Normal file
61
vendor/github.com/hashicorp/vault/api/auth/approle/LICENSE
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
|
||||
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||
|
||||
Parameters
|
||||
|
||||
Licensor: HashiCorp, Inc.
|
||||
Licensed Work: Vault 1.15.0. The Licensed Work is (c) 2023 HashiCorp, Inc.
|
||||
Additional Use Grant: You may make production use of the Licensed Work,
|
||||
provided such use does not include offering the Licensed Work
|
||||
to third parties on a hosted or embedded basis which is
|
||||
competitive with HashiCorp's products.
|
||||
Change Date: Four years from the date the Licensed Work is published.
|
||||
Change License: MPL 2.0
|
||||
|
||||
For information about alternative licensing arrangements for the Licensed Work,
|
||||
please contact licensing@hashicorp.com.
|
||||
|
||||
Notice
|
||||
|
||||
Business Source License 1.1
|
||||
|
||||
Terms
|
||||
|
||||
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||
works, redistribute, and make non-production use of the Licensed Work. The
|
||||
Licensor may make an Additional Use Grant, above, permitting limited production use.
|
||||
|
||||
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||
available distribution of a specific version of the Licensed Work under this
|
||||
License, whichever comes first, the Licensor hereby grants you rights under
|
||||
the terms of the Change License, and the rights granted in the paragraph
|
||||
above terminate.
|
||||
|
||||
If your use of the Licensed Work does not comply with the requirements
|
||||
currently in effect as described in this License, you must purchase a
|
||||
commercial license from the Licensor, its affiliated entities, or authorized
|
||||
resellers, or you must refrain from using the Licensed Work.
|
||||
|
||||
All copies of the original and modified Licensed Work, and derivative works
|
||||
of the Licensed Work, are subject to this License. This License applies
|
||||
separately for each version of the Licensed Work and the Change Date may vary
|
||||
for each version of the Licensed Work released by Licensor.
|
||||
|
||||
You must conspicuously display this License on each original or modified copy
|
||||
of the Licensed Work. If you receive the Licensed Work in original or
|
||||
modified form from a third party, the terms and conditions set forth in this
|
||||
License apply to your use of that work.
|
||||
|
||||
Any use of the Licensed Work in violation of this License will automatically
|
||||
terminate your rights under this License for the current and all other
|
||||
versions of the Licensed Work.
|
||||
|
||||
This License does not grant you any right in any trademark or logo of
|
||||
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||
Licensor as expressly required by this License).
|
||||
|
||||
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||
TITLE.
|
208
vendor/github.com/hashicorp/vault/api/auth/approle/approle.go
generated
vendored
Normal file
208
vendor/github.com/hashicorp/vault/api/auth/approle/approle.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package approle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
type AppRoleAuth struct {
|
||||
mountPath string
|
||||
roleID string
|
||||
secretID string
|
||||
secretIDFile string
|
||||
secretIDEnv string
|
||||
unwrap bool
|
||||
}
|
||||
|
||||
var _ api.AuthMethod = (*AppRoleAuth)(nil)
|
||||
|
||||
// SecretID is a struct that allows you to specify where your application is
|
||||
// storing the secret ID required for login to the AppRole auth method. The
|
||||
// recommended secure pattern is to use response-wrapping tokens rather than
|
||||
// a plaintext value, by passing WithWrappingToken() to NewAppRoleAuth.
|
||||
// https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices
|
||||
type SecretID struct {
|
||||
// Path on the file system where the secret ID can be found.
|
||||
FromFile string
|
||||
// The name of the environment variable containing the application's
|
||||
// secret ID.
|
||||
FromEnv string
|
||||
// The secret ID as a plaintext string value.
|
||||
FromString string
|
||||
}
|
||||
|
||||
type LoginOption func(a *AppRoleAuth) error
|
||||
|
||||
const (
|
||||
defaultMountPath = "approle"
|
||||
)
|
||||
|
||||
// NewAppRoleAuth initializes a new AppRole auth method interface to be
|
||||
// passed as a parameter to the client.Auth().Login method.
|
||||
//
|
||||
// For a secret ID, the recommended secure pattern is to unwrap a one-time-use
|
||||
// response-wrapping token that was placed here by a trusted orchestrator
|
||||
// (https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices)
|
||||
// To indicate that the filepath points to this wrapping token and not just
|
||||
// a plaintext secret ID, initialize NewAppRoleAuth with the
|
||||
// WithWrappingToken LoginOption.
|
||||
//
|
||||
// Supported options: WithMountPath, WithWrappingToken
|
||||
func NewAppRoleAuth(roleID string, secretID *SecretID, opts ...LoginOption) (*AppRoleAuth, error) {
|
||||
if roleID == "" {
|
||||
return nil, fmt.Errorf("no role ID provided for login")
|
||||
}
|
||||
|
||||
if secretID == nil {
|
||||
return nil, fmt.Errorf("no secret ID provided for login")
|
||||
}
|
||||
|
||||
err := secretID.validate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid secret ID: %w", err)
|
||||
}
|
||||
|
||||
a := &AppRoleAuth{
|
||||
mountPath: defaultMountPath,
|
||||
roleID: roleID,
|
||||
}
|
||||
|
||||
// secret ID will be read in at login time if it comes from a file or environment variable, in case the underlying value changes
|
||||
if secretID.FromFile != "" {
|
||||
a.secretIDFile = secretID.FromFile
|
||||
}
|
||||
|
||||
if secretID.FromEnv != "" {
|
||||
a.secretIDEnv = secretID.FromEnv
|
||||
}
|
||||
|
||||
if secretID.FromString != "" {
|
||||
a.secretID = secretID.FromString
|
||||
}
|
||||
|
||||
// Loop through each option
|
||||
for _, opt := range opts {
|
||||
// Call the option giving the instantiated
|
||||
// *AppRoleAuth as the argument
|
||||
err := opt(a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error with login option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// return the modified auth struct instance
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *AppRoleAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
loginData := map[string]interface{}{
|
||||
"role_id": a.roleID,
|
||||
}
|
||||
|
||||
var secretIDValue string
|
||||
|
||||
switch {
|
||||
case a.secretIDFile != "":
|
||||
s, err := a.readSecretIDFromFile()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading secret ID: %w", err)
|
||||
}
|
||||
secretIDValue = s
|
||||
case a.secretIDEnv != "":
|
||||
s := os.Getenv(a.secretIDEnv)
|
||||
if s == "" {
|
||||
return nil, fmt.Errorf("secret ID was specified with an environment variable %q with an empty value", a.secretIDEnv)
|
||||
}
|
||||
secretIDValue = s
|
||||
default:
|
||||
secretIDValue = a.secretID
|
||||
}
|
||||
|
||||
// if the caller indicated that the value was actually a wrapping token, unwrap it first
|
||||
if a.unwrap {
|
||||
unwrappedToken, err := client.Logical().UnwrapWithContext(ctx, secretIDValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unwrap response wrapping token: %w", err)
|
||||
}
|
||||
loginData["secret_id"] = unwrappedToken.Data["secret_id"]
|
||||
} else {
|
||||
loginData["secret_id"] = secretIDValue
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("auth/%s/login", a.mountPath)
|
||||
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to log in with app role auth: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func WithMountPath(mountPath string) LoginOption {
|
||||
return func(a *AppRoleAuth) error {
|
||||
a.mountPath = mountPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithWrappingToken() LoginOption {
|
||||
return func(a *AppRoleAuth) error {
|
||||
a.unwrap = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AppRoleAuth) readSecretIDFromFile() (string, error) {
|
||||
secretIDFile, err := os.Open(a.secretIDFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to open file containing secret ID: %w", err)
|
||||
}
|
||||
defer secretIDFile.Close()
|
||||
|
||||
limitedReader := io.LimitReader(secretIDFile, 1000)
|
||||
secretIDBytes, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to read secret ID: %w", err)
|
||||
}
|
||||
|
||||
secretIDValue := strings.TrimSuffix(string(secretIDBytes), "\n")
|
||||
|
||||
return secretIDValue, nil
|
||||
}
|
||||
|
||||
func (secretID *SecretID) validate() error {
|
||||
if secretID.FromFile == "" && secretID.FromEnv == "" && secretID.FromString == "" {
|
||||
return fmt.Errorf("secret ID for AppRole must be provided with a source file, environment variable, or plaintext string")
|
||||
}
|
||||
|
||||
if secretID.FromFile != "" {
|
||||
if secretID.FromEnv != "" || secretID.FromString != "" {
|
||||
return fmt.Errorf("only one source for the secret ID should be specified")
|
||||
}
|
||||
}
|
||||
|
||||
if secretID.FromEnv != "" {
|
||||
if secretID.FromFile != "" || secretID.FromString != "" {
|
||||
return fmt.Errorf("only one source for the secret ID should be specified")
|
||||
}
|
||||
}
|
||||
|
||||
if secretID.FromString != "" {
|
||||
if secretID.FromFile != "" || secretID.FromEnv != "" {
|
||||
return fmt.Errorf("only one source for the secret ID should be specified")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE
generated
vendored
Normal file
61
vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
|
||||
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||
|
||||
Parameters
|
||||
|
||||
Licensor: HashiCorp, Inc.
|
||||
Licensed Work: Vault 1.15.0. The Licensed Work is (c) 2023 HashiCorp, Inc.
|
||||
Additional Use Grant: You may make production use of the Licensed Work,
|
||||
provided such use does not include offering the Licensed Work
|
||||
to third parties on a hosted or embedded basis which is
|
||||
competitive with HashiCorp's products.
|
||||
Change Date: Four years from the date the Licensed Work is published.
|
||||
Change License: MPL 2.0
|
||||
|
||||
For information about alternative licensing arrangements for the Licensed Work,
|
||||
please contact licensing@hashicorp.com.
|
||||
|
||||
Notice
|
||||
|
||||
Business Source License 1.1
|
||||
|
||||
Terms
|
||||
|
||||
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||
works, redistribute, and make non-production use of the Licensed Work. The
|
||||
Licensor may make an Additional Use Grant, above, permitting limited production use.
|
||||
|
||||
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||
available distribution of a specific version of the Licensed Work under this
|
||||
License, whichever comes first, the Licensor hereby grants you rights under
|
||||
the terms of the Change License, and the rights granted in the paragraph
|
||||
above terminate.
|
||||
|
||||
If your use of the Licensed Work does not comply with the requirements
|
||||
currently in effect as described in this License, you must purchase a
|
||||
commercial license from the Licensor, its affiliated entities, or authorized
|
||||
resellers, or you must refrain from using the Licensed Work.
|
||||
|
||||
All copies of the original and modified Licensed Work, and derivative works
|
||||
of the Licensed Work, are subject to this License. This License applies
|
||||
separately for each version of the Licensed Work and the Change Date may vary
|
||||
for each version of the Licensed Work released by Licensor.
|
||||
|
||||
You must conspicuously display this License on each original or modified copy
|
||||
of the Licensed Work. If you receive the Licensed Work in original or
|
||||
modified form from a third party, the terms and conditions set forth in this
|
||||
License apply to your use of that work.
|
||||
|
||||
Any use of the Licensed Work in violation of this License will automatically
|
||||
terminate your rights under this License for the current and all other
|
||||
versions of the Licensed Work.
|
||||
|
||||
This License does not grant you any right in any trademark or logo of
|
||||
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||
Licensor as expressly required by this License).
|
||||
|
||||
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||
TITLE.
|
136
vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go
generated
vendored
Normal file
136
vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
type KubernetesAuth struct {
|
||||
roleName string
|
||||
mountPath string
|
||||
serviceAccountToken string
|
||||
}
|
||||
|
||||
var _ api.AuthMethod = (*KubernetesAuth)(nil)
|
||||
|
||||
type LoginOption func(a *KubernetesAuth) error
|
||||
|
||||
const (
|
||||
defaultMountPath = "kubernetes"
|
||||
defaultServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
)
|
||||
|
||||
// NewKubernetesAuth creates a KubernetesAuth struct which can be passed to
|
||||
// the client.Auth().Login method to authenticate to Vault. The roleName
|
||||
// parameter should be the name of the role in Vault that was created with
|
||||
// this app's Kubernetes service account bound to it.
|
||||
//
|
||||
// The Kubernetes service account token JWT is retrieved from
|
||||
// /var/run/secrets/kubernetes.io/serviceaccount/token by default. To change this
|
||||
// path, pass the WithServiceAccountTokenPath option. To instead pass the
|
||||
// JWT directly as a string, or to read the value from an environment
|
||||
// variable, use WithServiceAccountToken and WithServiceAccountTokenEnv respectively.
|
||||
//
|
||||
// Supported options: WithMountPath, WithServiceAccountTokenPath, WithServiceAccountTokenEnv, WithServiceAccountToken
|
||||
func NewKubernetesAuth(roleName string, opts ...LoginOption) (*KubernetesAuth, error) {
|
||||
if roleName == "" {
|
||||
return nil, fmt.Errorf("no role name was provided")
|
||||
}
|
||||
|
||||
a := &KubernetesAuth{
|
||||
roleName: roleName,
|
||||
mountPath: defaultMountPath,
|
||||
}
|
||||
|
||||
// Loop through each option
|
||||
for _, opt := range opts {
|
||||
// Call the option giving the instantiated
|
||||
// *KubernetesAuth as the argument
|
||||
err := opt(a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error with login option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if a.serviceAccountToken == "" {
|
||||
token, err := readTokenFromFile(defaultServiceAccountTokenPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading service account token from default location: %w", err)
|
||||
}
|
||||
a.serviceAccountToken = token
|
||||
}
|
||||
|
||||
// return the modified auth struct instance
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *KubernetesAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
loginData := map[string]interface{}{
|
||||
"jwt": a.serviceAccountToken,
|
||||
"role": a.roleName,
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("auth/%s/login", a.mountPath)
|
||||
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to log in with Kubernetes auth: %w", err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func WithMountPath(mountPath string) LoginOption {
|
||||
return func(a *KubernetesAuth) error {
|
||||
a.mountPath = mountPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithServiceAccountTokenPath allows you to specify a different path to
|
||||
// where your application's Kubernetes service account token is mounted,
|
||||
// instead of the default of /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
func WithServiceAccountTokenPath(pathToToken string) LoginOption {
|
||||
return func(a *KubernetesAuth) error {
|
||||
token, err := readTokenFromFile(pathToToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read service account token from file: %w", err)
|
||||
}
|
||||
a.serviceAccountToken = token
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithServiceAccountToken(jwt string) LoginOption {
|
||||
return func(a *KubernetesAuth) error {
|
||||
a.serviceAccountToken = jwt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithServiceAccountTokenEnv(envVar string) LoginOption {
|
||||
return func(a *KubernetesAuth) error {
|
||||
token := os.Getenv(envVar)
|
||||
if token == "" {
|
||||
return fmt.Errorf("service account token was specified with an environment variable with an empty value")
|
||||
}
|
||||
a.serviceAccountToken = token
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func readTokenFromFile(filepath string) (string, error) {
|
||||
jwt, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to read file containing service account token: %w", err)
|
||||
}
|
||||
return string(jwt), nil
|
||||
}
|
445
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
445
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
@ -1,445 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMinBackoff = 1 * time.Second
|
||||
defaultMaxBackoff = 5 * time.Minute
|
||||
)
|
||||
|
||||
// AuthMethod is the interface that auto-auth methods implement for the agent
|
||||
// to use.
|
||||
type AuthMethod interface {
|
||||
// Authenticate returns a mount path, header, request body, and error.
|
||||
// The header may be nil if no special header is needed.
|
||||
Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error)
|
||||
NewCreds() chan struct{}
|
||||
CredSuccess()
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// AuthMethodWithClient is an extended interface that can return an API client
|
||||
// for use during the authentication call.
|
||||
type AuthMethodWithClient interface {
|
||||
AuthMethod
|
||||
AuthClient(client *api.Client) (*api.Client, error)
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
Logger hclog.Logger
|
||||
MountPath string
|
||||
WrapTTL time.Duration
|
||||
Config map[string]interface{}
|
||||
}
|
||||
|
||||
// AuthHandler is responsible for keeping a token alive and renewed and passing
|
||||
// new tokens to the sink server
|
||||
type AuthHandler struct {
|
||||
OutputCh chan string
|
||||
TemplateTokenCh chan string
|
||||
token string
|
||||
logger hclog.Logger
|
||||
client *api.Client
|
||||
random *rand.Rand
|
||||
wrapTTL time.Duration
|
||||
maxBackoff time.Duration
|
||||
minBackoff time.Duration
|
||||
enableReauthOnNewCredentials bool
|
||||
enableTemplateTokenCh bool
|
||||
exitOnError bool
|
||||
}
|
||||
|
||||
type AuthHandlerConfig struct {
|
||||
Logger hclog.Logger
|
||||
Client *api.Client
|
||||
WrapTTL time.Duration
|
||||
MaxBackoff time.Duration
|
||||
MinBackoff time.Duration
|
||||
Token string
|
||||
EnableReauthOnNewCredentials bool
|
||||
EnableTemplateTokenCh bool
|
||||
ExitOnError bool
|
||||
}
|
||||
|
||||
func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
|
||||
ah := &AuthHandler{
|
||||
// This is buffered so that if we try to output after the sink server
|
||||
// has been shut down, during agent shutdown, we won't block
|
||||
OutputCh: make(chan string, 1),
|
||||
TemplateTokenCh: make(chan string, 1),
|
||||
token: conf.Token,
|
||||
logger: conf.Logger,
|
||||
client: conf.Client,
|
||||
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
wrapTTL: conf.WrapTTL,
|
||||
minBackoff: conf.MinBackoff,
|
||||
maxBackoff: conf.MaxBackoff,
|
||||
enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials,
|
||||
enableTemplateTokenCh: conf.EnableTemplateTokenCh,
|
||||
exitOnError: conf.ExitOnError,
|
||||
}
|
||||
|
||||
return ah
|
||||
}
|
||||
|
||||
func backoff(ctx context.Context, backoff *agentBackoff) bool {
|
||||
if backoff.exitOnErr {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(backoff.current):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
// Increase exponential backoff for the next time if we don't
|
||||
// successfully auth/renew/etc.
|
||||
backoff.next()
|
||||
return true
|
||||
}
|
||||
|
||||
func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
if am == nil {
|
||||
return errors.New("auth handler: nil auth method")
|
||||
}
|
||||
|
||||
if ah.minBackoff <= 0 {
|
||||
ah.minBackoff = defaultMinBackoff
|
||||
}
|
||||
|
||||
backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError)
|
||||
|
||||
if backoffCfg.min >= backoffCfg.max {
|
||||
return errors.New("auth handler: min_backoff cannot be greater than max_backoff")
|
||||
}
|
||||
|
||||
ah.logger.Info("starting auth handler")
|
||||
defer func() {
|
||||
am.Shutdown()
|
||||
close(ah.OutputCh)
|
||||
close(ah.TemplateTokenCh)
|
||||
ah.logger.Info("auth handler stopped")
|
||||
}()
|
||||
|
||||
credCh := am.NewCreds()
|
||||
if !ah.enableReauthOnNewCredentials {
|
||||
realCredCh := credCh
|
||||
credCh = nil
|
||||
if realCredCh != nil {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-realCredCh:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if credCh == nil {
|
||||
credCh = make(chan struct{})
|
||||
}
|
||||
|
||||
var watcher *api.LifetimeWatcher
|
||||
first := true
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
|
||||
default:
|
||||
}
|
||||
|
||||
var clientToUse *api.Client
|
||||
var err error
|
||||
var path string
|
||||
var data map[string]interface{}
|
||||
var header http.Header
|
||||
|
||||
switch am.(type) {
|
||||
case AuthMethodWithClient:
|
||||
clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
default:
|
||||
clientToUse = ah.client
|
||||
}
|
||||
|
||||
// Disable retry on the client to ensure our backoffOrQuit function is
|
||||
// the only source of retry/backoff.
|
||||
clientToUse.SetMaxRetries(0)
|
||||
|
||||
var secret *api.Secret = new(api.Secret)
|
||||
if first && ah.token != "" {
|
||||
ah.logger.Debug("using preloaded token")
|
||||
|
||||
first = false
|
||||
ah.logger.Debug("lookup-self with preloaded token")
|
||||
clientToUse.SetToken(ah.token)
|
||||
|
||||
secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx)
|
||||
if err != nil {
|
||||
ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
duration, _ := secret.Data["ttl"].(json.Number).Int64()
|
||||
secret.Auth = &api.SecretAuth{
|
||||
ClientToken: secret.Data["id"].(string),
|
||||
LeaseDuration: int(duration),
|
||||
Renewable: secret.Data["renewable"].(bool),
|
||||
}
|
||||
} else {
|
||||
ah.logger.Info("authenticating")
|
||||
|
||||
path, header, data, err = am.Authenticate(ctx, ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ah.wrapTTL > 0 {
|
||||
wrapClient, err := clientToUse.Clone()
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
wrapClient.SetWrappingLookupFunc(func(string, string) string {
|
||||
return ah.wrapTTL.String()
|
||||
})
|
||||
clientToUse = wrapClient
|
||||
}
|
||||
for key, values := range header {
|
||||
for _, value := range values {
|
||||
clientToUse.AddHeader(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// This should only happen if there's no preloaded token (regular auto-auth login)
|
||||
// or if a preloaded token has expired and is now switching to auto-auth.
|
||||
if secret.Auth == nil {
|
||||
secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data)
|
||||
// Check errors/sanity
|
||||
if err != nil {
|
||||
ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case ah.wrapTTL > 0:
|
||||
if secret.WrapInfo == nil {
|
||||
ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if secret.WrapInfo.Token == "" {
|
||||
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo)
|
||||
if err != nil {
|
||||
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing")
|
||||
ah.OutputCh <- string(wrappedResp)
|
||||
if ah.enableTemplateTokenCh {
|
||||
ah.TemplateTokenCh <- string(wrappedResp)
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoffCfg.reset()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ah.logger.Info("shutdown triggered")
|
||||
continue
|
||||
|
||||
case <-credCh:
|
||||
ah.logger.Info("auth method found new credentials, re-authenticating")
|
||||
continue
|
||||
}
|
||||
|
||||
default:
|
||||
if secret == nil || secret.Auth == nil {
|
||||
ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if secret.Auth.ClientToken == "" {
|
||||
ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
ah.logger.Info("authentication successful, sending token to sinks")
|
||||
ah.OutputCh <- secret.Auth.ClientToken
|
||||
if ah.enableTemplateTokenCh {
|
||||
ah.TemplateTokenCh <- secret.Auth.ClientToken
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoffCfg.reset()
|
||||
}
|
||||
|
||||
if watcher != nil {
|
||||
watcher.Stop()
|
||||
}
|
||||
|
||||
watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{
|
||||
Secret: secret,
|
||||
})
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the renewal process
|
||||
ah.logger.Info("starting renewal process")
|
||||
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
|
||||
go watcher.Renew()
|
||||
|
||||
LifetimeWatcherLoop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ah.logger.Info("shutdown triggered, stopping lifetime watcher")
|
||||
watcher.Stop()
|
||||
break LifetimeWatcherLoop
|
||||
|
||||
case err := <-watcher.DoneCh():
|
||||
ah.logger.Info("lifetime watcher done channel triggered")
|
||||
if err != nil {
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
ah.logger.Error("error renewing token", "error", err)
|
||||
}
|
||||
break LifetimeWatcherLoop
|
||||
|
||||
case <-watcher.RenewCh():
|
||||
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
|
||||
ah.logger.Info("renewed auth token")
|
||||
|
||||
case <-credCh:
|
||||
ah.logger.Info("auth method found new credentials, re-authenticating")
|
||||
break LifetimeWatcherLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// agentBackoff tracks exponential backoff state.
|
||||
type agentBackoff struct {
|
||||
min time.Duration
|
||||
max time.Duration
|
||||
current time.Duration
|
||||
exitOnErr bool
|
||||
}
|
||||
|
||||
func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff {
|
||||
if max <= 0 {
|
||||
max = defaultMaxBackoff
|
||||
}
|
||||
|
||||
if min <= 0 {
|
||||
min = defaultMinBackoff
|
||||
}
|
||||
|
||||
return &agentBackoff{
|
||||
current: min,
|
||||
max: max,
|
||||
min: min,
|
||||
exitOnErr: exitErr,
|
||||
}
|
||||
}
|
||||
|
||||
// next determines the next backoff duration that is roughly twice
|
||||
// the current value, capped to a max value, with a measure of randomness.
|
||||
func (b *agentBackoff) next() {
|
||||
maxBackoff := 2 * b.current
|
||||
|
||||
if maxBackoff > b.max {
|
||||
maxBackoff = b.max
|
||||
}
|
||||
|
||||
// Trim a random amount (0-25%) off the doubled duration
|
||||
trim := rand.Int63n(int64(maxBackoff) / 4)
|
||||
b.current = maxBackoff - time.Duration(trim)
|
||||
}
|
||||
|
||||
func (b *agentBackoff) reset() {
|
||||
b.current = b.min
|
||||
}
|
||||
|
||||
func (b agentBackoff) String() string {
|
||||
return b.current.Truncate(10 * time.Millisecond).String()
|
||||
}
|
129
vendor/github.com/hashicorp/vault/command/agent/auth/kubernetes/kubernetes.go
generated
vendored
129
vendor/github.com/hashicorp/vault/command/agent/auth/kubernetes/kubernetes.go
generated
vendored
@ -1,129 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/command/agent/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceAccountFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
)
|
||||
|
||||
type kubernetesMethod struct {
|
||||
logger hclog.Logger
|
||||
mountPath string
|
||||
|
||||
role string
|
||||
|
||||
// tokenPath is an optional path to a projected service account token inside
|
||||
// the pod, for use instead of the default service account token.
|
||||
tokenPath string
|
||||
|
||||
// jwtData is a ReadCloser used to inject a ReadCloser for mocking tests.
|
||||
jwtData io.ReadCloser
|
||||
}
|
||||
|
||||
// NewKubernetesAuthMethod reads the user configuration and returns a configured
|
||||
// AuthMethod
|
||||
func NewKubernetesAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
|
||||
if conf == nil {
|
||||
return nil, errors.New("empty config")
|
||||
}
|
||||
if conf.Config == nil {
|
||||
return nil, errors.New("empty config data")
|
||||
}
|
||||
|
||||
k := &kubernetesMethod{
|
||||
logger: conf.Logger,
|
||||
mountPath: conf.MountPath,
|
||||
}
|
||||
|
||||
roleRaw, ok := conf.Config["role"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing 'role' value")
|
||||
}
|
||||
k.role, ok = roleRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'role' config value to string")
|
||||
}
|
||||
|
||||
tokenPathRaw, ok := conf.Config["token_path"]
|
||||
if ok {
|
||||
k.tokenPath, ok = tokenPathRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'token_path' config value to string")
|
||||
}
|
||||
}
|
||||
|
||||
if k.role == "" {
|
||||
return nil, errors.New("'role' value is empty")
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func (k *kubernetesMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) {
|
||||
k.logger.Trace("beginning authentication")
|
||||
|
||||
jwtString, err := k.readJWT()
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("error reading JWT with Kubernetes Auth: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/login", k.mountPath), nil, map[string]interface{}{
|
||||
"role": k.role,
|
||||
"jwt": jwtString,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *kubernetesMethod) NewCreds() chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kubernetesMethod) CredSuccess() {
|
||||
}
|
||||
|
||||
func (k *kubernetesMethod) Shutdown() {
|
||||
}
|
||||
|
||||
// readJWT reads the JWT data for the Agent to submit to Vault. The default is
|
||||
// to read the JWT from the default service account location, defined by the
|
||||
// constant serviceAccountFile. In normal use k.jwtData is nil at invocation and
|
||||
// the method falls back to reading the token path with os.Open, opening a file
|
||||
// from either the default location or from the token_path path specified in
|
||||
// configuration.
|
||||
func (k *kubernetesMethod) readJWT() (string, error) {
|
||||
// load configured token path if set, default to serviceAccountFile
|
||||
tokenFilePath := serviceAccountFile
|
||||
if k.tokenPath != "" {
|
||||
tokenFilePath = k.tokenPath
|
||||
}
|
||||
|
||||
data := k.jwtData
|
||||
// k.jwtData should only be non-nil in tests
|
||||
if data == nil {
|
||||
f, err := os.Open(tokenFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data = f
|
||||
}
|
||||
defer data.Close()
|
||||
|
||||
contentBytes, err := ioutil.ReadAll(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contentBytes)), nil
|
||||
}
|
365
vendor/github.com/hashicorp/vault/sdk/LICENSE
generated
vendored
365
vendor/github.com/hashicorp/vault/sdk/LICENSE
generated
vendored
@ -1,365 +0,0 @@
|
||||
Copyright (c) 2015 HashiCorp, Inc.
|
||||
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
222
vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go
generated
vendored
222
vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go
generated
vendored
@ -1,222 +0,0 @@
|
||||
package compressutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/lzw"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
const (
|
||||
// A byte value used as a canary prefix for the compressed information
|
||||
// which is used to distinguish if a JSON input is compressed or not.
|
||||
// The value of this constant should not be a first character of any
|
||||
// valid JSON string.
|
||||
|
||||
CompressionTypeGzip = "gzip"
|
||||
CompressionCanaryGzip byte = 'G'
|
||||
|
||||
CompressionTypeLZW = "lzw"
|
||||
CompressionCanaryLZW byte = 'L'
|
||||
|
||||
CompressionTypeSnappy = "snappy"
|
||||
CompressionCanarySnappy byte = 'S'
|
||||
|
||||
CompressionTypeLZ4 = "lz4"
|
||||
CompressionCanaryLZ4 byte = '4'
|
||||
)
|
||||
|
||||
// SnappyReadCloser embeds the snappy reader which implements the io.Reader
|
||||
// interface. The decompress procedure in this utility expects an
|
||||
// io.ReadCloser. This type implements the io.Closer interface to retain the
|
||||
// generic way of decompression.
|
||||
type CompressUtilReadCloser struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Close is a noop method implemented only to satisfy the io.Closer interface
|
||||
func (c *CompressUtilReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompressionConfig is used to select a compression type to be performed by
|
||||
// Compress and Decompress utilities.
|
||||
// Supported types are:
|
||||
// * CompressionTypeLZW
|
||||
// * CompressionTypeGzip
|
||||
// * CompressionTypeSnappy
|
||||
// * CompressionTypeLZ4
|
||||
//
|
||||
// When using CompressionTypeGzip, the compression levels can also be chosen:
|
||||
// * gzip.DefaultCompression
|
||||
// * gzip.BestSpeed
|
||||
// * gzip.BestCompression
|
||||
type CompressionConfig struct {
|
||||
// Type of the compression algorithm to be used
|
||||
Type string
|
||||
|
||||
// When using Gzip format, the compression level to employ
|
||||
GzipCompressionLevel int
|
||||
}
|
||||
|
||||
// Compress places the canary byte in a buffer and uses the same buffer to fill
|
||||
// in the compressed information of the given input. The configuration supports
|
||||
// two type of compression: LZW and Gzip. When using Gzip compression format,
|
||||
// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will
|
||||
// be assumed.
|
||||
func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
var writer io.WriteCloser
|
||||
var err error
|
||||
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("config is nil")
|
||||
}
|
||||
|
||||
// Write the canary into the buffer and create writer to compress the
|
||||
// input data based on the configured type
|
||||
switch config.Type {
|
||||
case CompressionTypeLZW:
|
||||
buf.Write([]byte{CompressionCanaryLZW})
|
||||
writer = lzw.NewWriter(&buf, lzw.LSB, 8)
|
||||
|
||||
case CompressionTypeGzip:
|
||||
buf.Write([]byte{CompressionCanaryGzip})
|
||||
|
||||
switch {
|
||||
case config.GzipCompressionLevel == gzip.BestCompression,
|
||||
config.GzipCompressionLevel == gzip.BestSpeed,
|
||||
config.GzipCompressionLevel == gzip.DefaultCompression:
|
||||
// These are valid compression levels
|
||||
default:
|
||||
// If compression level is set to NoCompression or to
|
||||
// any invalid value, fallback to Defaultcompression
|
||||
config.GzipCompressionLevel = gzip.DefaultCompression
|
||||
}
|
||||
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
|
||||
|
||||
case CompressionTypeSnappy:
|
||||
buf.Write([]byte{CompressionCanarySnappy})
|
||||
writer = snappy.NewBufferedWriter(&buf)
|
||||
|
||||
case CompressionTypeLZ4:
|
||||
buf.Write([]byte{CompressionCanaryLZ4})
|
||||
writer = lz4.NewWriter(&buf)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression type")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err)
|
||||
}
|
||||
|
||||
if writer == nil {
|
||||
return nil, fmt.Errorf("failed to create a compression writer")
|
||||
}
|
||||
|
||||
// Compress the input and place it in the same buffer containing the
|
||||
// canary byte.
|
||||
if _, err = writer.Write(data); err != nil {
|
||||
return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err)
|
||||
}
|
||||
|
||||
// Close the io.WriteCloser
|
||||
if err = writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the compressed bytes with canary byte at the start
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Decompress checks if the first byte in the input matches the canary byte.
|
||||
// If the first byte is a canary byte, then the input past the canary byte
|
||||
// will be decompressed using the method specified in the given configuration.
|
||||
// If the first byte isn't a canary byte, then the utility returns a boolean
|
||||
// value indicating that the input was not compressed.
|
||||
func Decompress(data []byte) ([]byte, bool, error) {
|
||||
bytes, _, notCompressed, err := DecompressWithCanary(data)
|
||||
return bytes, notCompressed, err
|
||||
}
|
||||
|
||||
// DecompressWithCanary checks if the first byte in the input matches the canary byte.
|
||||
// If the first byte is a canary byte, then the input past the canary byte
|
||||
// will be decompressed using the method specified in the given configuration. The type of compression used is also
|
||||
// returned. If the first byte isn't a canary byte, then the utility returns a boolean
|
||||
// value indicating that the input was not compressed.
|
||||
func DecompressWithCanary(data []byte) ([]byte, string, bool, error) {
|
||||
var err error
|
||||
var reader io.ReadCloser
|
||||
var compressionType string
|
||||
if data == nil || len(data) == 0 {
|
||||
return nil, "", false, fmt.Errorf("'data' being decompressed is empty")
|
||||
}
|
||||
|
||||
canary := data[0]
|
||||
cData := data[1:]
|
||||
|
||||
switch canary {
|
||||
// If the first byte matches the canary byte, remove the canary
|
||||
// byte and try to decompress the data that is after the canary.
|
||||
case CompressionCanaryGzip:
|
||||
if len(data) < 2 {
|
||||
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
reader, err = gzip.NewReader(bytes.NewReader(cData))
|
||||
compressionType = CompressionTypeGzip
|
||||
|
||||
case CompressionCanaryLZW:
|
||||
if len(data) < 2 {
|
||||
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8)
|
||||
compressionType = CompressionTypeLZW
|
||||
|
||||
case CompressionCanarySnappy:
|
||||
if len(data) < 2 {
|
||||
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
reader = &CompressUtilReadCloser{
|
||||
Reader: snappy.NewReader(bytes.NewReader(cData)),
|
||||
}
|
||||
compressionType = CompressionTypeSnappy
|
||||
|
||||
case CompressionCanaryLZ4:
|
||||
if len(data) < 2 {
|
||||
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
|
||||
}
|
||||
reader = &CompressUtilReadCloser{
|
||||
Reader: lz4.NewReader(bytes.NewReader(cData)),
|
||||
}
|
||||
compressionType = CompressionTypeLZ4
|
||||
|
||||
default:
|
||||
// If the first byte doesn't match the canary byte, it means
|
||||
// that the content was not compressed at all. Indicate the
|
||||
// caller that the input was not compressed.
|
||||
return nil, "", true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err)
|
||||
}
|
||||
if reader == nil {
|
||||
return nil, "", false, fmt.Errorf("failed to create a compression reader")
|
||||
}
|
||||
|
||||
// Close the io.ReadCloser
|
||||
defer reader.Close()
|
||||
|
||||
// Read all the compressed data into a buffer
|
||||
var buf bytes.Buffer
|
||||
if _, err = io.Copy(&buf, reader); err != nil {
|
||||
return nil, "", false, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), compressionType, false, nil
|
||||
}
|
100
vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go
generated
vendored
100
vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
package jsonutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/sdk/helper/compressutil"
|
||||
)
|
||||
|
||||
// Encodes/Marshals the given object into JSON
|
||||
func EncodeJSON(in interface{}) ([]byte, error) {
|
||||
if in == nil {
|
||||
return nil, fmt.Errorf("input for encoding is nil")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
if err := enc.Encode(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// EncodeJSONAndCompress encodes the given input into JSON and compresses the
|
||||
// encoded value (using Gzip format BestCompression level, by default). A
|
||||
// canary byte is placed at the beginning of the returned bytes for the logic
|
||||
// in decompression method to identify compressed input.
|
||||
func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) {
|
||||
if in == nil {
|
||||
return nil, fmt.Errorf("input for encoding is nil")
|
||||
}
|
||||
|
||||
// First JSON encode the given input
|
||||
encodedBytes, err := EncodeJSON(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
config = &compressutil.CompressionConfig{
|
||||
Type: compressutil.CompressionTypeGzip,
|
||||
GzipCompressionLevel: gzip.BestCompression,
|
||||
}
|
||||
}
|
||||
|
||||
return compressutil.Compress(encodedBytes, config)
|
||||
}
|
||||
|
||||
// DecodeJSON tries to decompress the given data. The call to decompress, fails
|
||||
// if the content was not compressed in the first place, which is identified by
|
||||
// a canary byte before the compressed data. If the data is not compressed, it
|
||||
// is JSON decoded directly. Otherwise the decompressed data will be JSON
|
||||
// decoded.
|
||||
func DecodeJSON(data []byte, out interface{}) error {
|
||||
if data == nil || len(data) == 0 {
|
||||
return fmt.Errorf("'data' being decoded is nil")
|
||||
}
|
||||
if out == nil {
|
||||
return fmt.Errorf("output parameter 'out' is nil")
|
||||
}
|
||||
|
||||
// Decompress the data if it was compressed in the first place
|
||||
decompressedBytes, uncompressed, err := compressutil.Decompress(data)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("failed to decompress JSON: {{err}}", err)
|
||||
}
|
||||
if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
|
||||
return fmt.Errorf("decompressed data being decoded is invalid")
|
||||
}
|
||||
|
||||
// If the input supplied failed to contain the compression canary, it
|
||||
// will be notified by the compression utility. Decode the decompressed
|
||||
// input.
|
||||
if !uncompressed {
|
||||
data = decompressedBytes
|
||||
}
|
||||
|
||||
return DecodeJSONFromReader(bytes.NewReader(data), out)
|
||||
}
|
||||
|
||||
// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object
|
||||
func DecodeJSONFromReader(r io.Reader, out interface{}) error {
|
||||
if r == nil {
|
||||
return fmt.Errorf("'io.Reader' being decoded is nil")
|
||||
}
|
||||
if out == nil {
|
||||
return fmt.Errorf("output parameter 'out' is nil")
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(r)
|
||||
|
||||
// While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`.
|
||||
dec.UseNumber()
|
||||
|
||||
// Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
|
||||
return dec.Decode(out)
|
||||
}
|
Reference in New Issue
Block a user