mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-24 14:00:19 +00:00
rebase: update vault to latest release
even 1.9.9 i havign security vulnerabilities https://github.com/ceph/ceph-csi/actions/ \runs/5088482029/jobs/9144940410?pr=3859 updating the vault to latest release and all other updates are due to the dependency update by `go mod tidy` Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
41a61efee4
commit
aa4271a32a
58
go.mod
58
go.mod
@ -18,7 +18,7 @@ require (
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/vault/api v1.9.0
|
||||
github.com/hashicorp/vault/api v1.9.1
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.13.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||
@ -26,12 +26,12 @@ require (
|
||||
github.com/onsi/gomega v1.27.1
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
golang.org/x/crypto v0.6.0
|
||||
golang.org/x/net v0.7.0
|
||||
golang.org/x/sys v0.5.0
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/sys v0.6.0
|
||||
google.golang.org/grpc v1.53.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
google.golang.org/protobuf v1.29.1
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
@ -50,6 +50,7 @@ require (
|
||||
require (
|
||||
github.com/ansel1/merry v1.6.2 // indirect
|
||||
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
||||
@ -60,12 +61,12 @@ require (
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/color v1.14.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/frankban/quicktest v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
@ -85,25 +86,27 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.1.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-3 // indirect
|
||||
github.com/hashicorp/vault v1.9.9 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.7.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
|
||||
github.com/hashicorp/vault v1.13.2 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.8.1 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
@ -117,7 +120,7 @@ require (
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
@ -125,25 +128,24 @@ require (
|
||||
github.com/spf13/cobra v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel v1.11.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.11.2 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.2 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/api v0.103.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
@ -168,6 +170,12 @@ replace (
|
||||
// Required for kubernetes 1.26
|
||||
github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0
|
||||
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||
|
||||
// workaround for https://github.com/open-telemetry/opentelemetry-go/issues/3548
|
||||
go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.11.1
|
||||
go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.11.1
|
||||
go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.11.2
|
||||
|
||||
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||
//
|
||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||
|
221
go.sum
221
go.sum
@ -27,21 +27,21 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
|
||||
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=
|
||||
cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg=
|
||||
cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA=
|
||||
cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE=
|
||||
cloud.google.com/go/kms v1.9.0 h1:b0votJQa/9DSsxgHwN33/tTLA7ZHVzfWhDCrfiXijSo=
|
||||
cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
@ -54,19 +54,19 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v61.4.0+incompatible h1:BF2Pm3aQWIa6q9KmxyF1JYKYXtVw67vtvu2Wd54NGuY=
|
||||
github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
@ -84,6 +84,7 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
|
||||
@ -94,7 +95,7 @@ github.com/IBM/keyprotect-go-client v0.9.2 h1:3fdmKVRl3gBWw6YJhPxLBJEHFbLhj/1v96
|
||||
github.com/IBM/keyprotect-go-client v0.9.2/go.mod h1:yr8h2noNgU8vcbs+vhqoXp3Lmv73PI0zAc6VMgFvWwM=
|
||||
github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
|
||||
github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
|
||||
@ -104,6 +105,7 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 h1:NsReiLpErIPzRrnogAXYwSoU7txA977LjDGrbkewJbg=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
|
||||
@ -111,7 +113,6 @@ github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UA
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
|
||||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@ -119,8 +120,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.62.146 h1:zAH0YjWzonbKHvNkfbxqTmX51uHbkQYu+jJah2IAiCA=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/ansel1/merry v1.5.0/go.mod h1:wUy/yW0JX0ix9GYvUbciq+bi3jW/vlKPlbpI7qdZpOw=
|
||||
github.com/ansel1/merry v1.5.1/go.mod h1:wUy/yW0JX0ix9GYvUbciq+bi3jW/vlKPlbpI7qdZpOw=
|
||||
@ -141,17 +142,17 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
|
||||
github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
|
||||
github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a h1:AP/vsCIvJZ129pdm9Ek7bH7yutN3hByqsMoNrWAxRQc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.44.205 h1:q23NJXgLPIuBMn4zaluWWz57HPP5z7Ut8ZtK1D3N9bs=
|
||||
@ -168,6 +169,7 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 h1:s49mSnsBZEXjfGBkRfmK+nPqzT7L
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3/go.mod h1:b+psTJn33Q4qGoDaM7ZiOVVG8uVjGI6HaZ8WBHdgDgU=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@ -183,6 +185,7 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
@ -212,6 +215,7 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
|
||||
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY=
|
||||
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14=
|
||||
github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:Zv7xtAh/T/tmfZlxpESaWWiWOdiJz2GfbBYxImuI6T4=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@ -261,11 +265,13 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
|
||||
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
|
||||
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
@ -313,8 +319,9 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@ -369,25 +376,34 @@ github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro=
|
||||
github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc=
|
||||
github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4=
|
||||
github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ=
|
||||
github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts=
|
||||
github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@ -399,7 +415,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
@ -507,11 +523,11 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
|
||||
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@ -550,6 +566,7 @@ github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPA
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/eventlogger v0.1.1 h1:zyCjxsy7KunFsMPZKU5PnwWEakSrp1zjj2vPFmrDaeo=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
@ -565,18 +582,27 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
|
||||
github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.1.0 h1:QsGcniKx5/LuX2eYoeL+Np3UKYPNaN7YKpTh29h8rbw=
|
||||
github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew=
|
||||
github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs=
|
||||
github.com/hashicorp/go-kms-wrapping v0.6.8 h1:Tu4X6xRFyV3i9SSthYVGnyNaof3VTxVo2tBQ7bdHiwE=
|
||||
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs=
|
||||
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
|
||||
github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8=
|
||||
github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0QqYSUOIj4HOHCVPEFjLqjxyXV/AbA=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 h1:X27JWuPW6Gmi2l7NMm0pvnp7z7hhtns2TeIOQU93mqI=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 h1:16I8OqBEuxZIowwn3jiLvhlx+z+ia4dJc9stvz0yUBU=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo=
|
||||
github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M=
|
||||
github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V7g8=
|
||||
github.com/hashicorp/go-memdb v1.3.3 h1:oGfEWrFuxtIUF3W2q/Jzt6G85TrMk9ey6XfYLvVe1Wo=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
@ -586,24 +612,25 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo=
|
||||
github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM=
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4=
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.1.5 h1:TkCWKqk1psjvUV7WktmZiRoZ1a9vw048AVnk/YbrzgY=
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA=
|
||||
github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
|
||||
github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||
@ -617,30 +644,32 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-3 h1:V95v5KSTu6DB5huDSKiq4uAfILEuNigK/+qPET6H/Mg=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-3/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
|
||||
github.com/hashicorp/hcp-sdk-go v0.23.0 h1:3WarkQSK0VzxJaH6psHIGQagag3ujL+NjWagZZHpiZM=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=
|
||||
github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE=
|
||||
github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
|
||||
github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
|
||||
github.com/hashicorp/raft v1.3.3 h1:Xr6DSHC5cIM8kzxu+IgoT/+MeNeUNeWin3ie6nlSrMg=
|
||||
github.com/hashicorp/raft-autopilot v0.1.3 h1:Y+5jWKTFABJhCrpVwGpGjti2LzwQSzivoqd2wM6JWGw=
|
||||
github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw=
|
||||
github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4=
|
||||
@ -649,8 +678,8 @@ github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBu
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
|
||||
github.com/hashicorp/vault v1.4.2/go.mod h1:500fLOj7p92Ys4X265LizqF78MzmHJUf1jV1zNJt060=
|
||||
github.com/hashicorp/vault v1.9.9 h1:Qqd/djZUmRQAB2LpenkbfCGVqFgXF2WQfz/gyDTqsbg=
|
||||
github.com/hashicorp/vault v1.9.9/go.mod h1:RvLfHfZvWp605avBk55DFNvnGNxp73kk8YlDtb/rfQA=
|
||||
github.com/hashicorp/vault v1.13.2 h1:cBJwrKpm5WdM5DDymguyG8FyiXb0Va5skyhXpx5q65E=
|
||||
github.com/hashicorp/vault v1.13.2/go.mod h1:xSiU7Oxurw1/4EGjktpYw7u0/a2UxLu4n8kzmVCromA=
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5/go.mod h1:sQ+VNwPQlemgXHXikYH6onfH9gPwDZ1GUVRLz0ZvHx8=
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.5.6-0.20200422235613-1b5c70f9ef68/go.mod h1:RCVBsf8AJndh4c6iGZtvVZFui9SG0Bj9fnF0SodNIkw=
|
||||
github.com/hashicorp/vault-plugin-auth-centrify v0.5.5/go.mod h1:GfRoy7NHsuR/ogmZtbExdJXUwbfwcxPrS9xzkyy2J/c=
|
||||
@ -677,8 +706,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
||||
github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8=
|
||||
github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM=
|
||||
github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38=
|
||||
github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs=
|
||||
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
|
||||
@ -688,8 +717,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.7.0 h1:2pQRO40R1etpKkia5fb4kjrdYMx3BHklPxl1pxpxDHg=
|
||||
github.com/hashicorp/vault/sdk v0.7.0/go.mod h1:KyfArJkhooyba7gYCKSq8v66QdqJmnbAxtV/OX1+JTs=
|
||||
github.com/hashicorp/vault/sdk v0.8.1 h1:bdlhIpxBmJuOZ5Anumao1xeiLocR2eQrBRuJynZfTac=
|
||||
github.com/hashicorp/vault/sdk v0.8.1/go.mod h1:kEpyfUU2ECGWf6XohKVFzvJ97ybSnXvxsTsBkbeVcQg=
|
||||
github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
@ -700,8 +729,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@ -749,13 +778,14 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
|
||||
github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -775,7 +805,6 @@ github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 h1:cMM5AB37e9aRG
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0/go.mod h1:VQVLCPGDX5l6V5PezjlDXLa+SpCbWSVU7B16cFWVVeE=
|
||||
github.com/layeh/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg=
|
||||
github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs=
|
||||
github.com/libopenstorage/openstorage v8.0.0+incompatible/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
|
||||
github.com/libopenstorage/operator v0.0.0-20200725001727-48d03e197117/go.mod h1:Qh+VXOB6hj60VmlgsmY+R1w+dFuHK246UueM4SAqZG0=
|
||||
@ -783,6 +812,7 @@ github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a h1:A4GqCY7+
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY=
|
||||
github.com/libopenstorage/stork v1.3.0-beta1.0.20200630005842-9255e7a98775/go.mod h1:qBSzYTJVHlOMg5RINNiHD1kBzlasnrc2uKLPZLgu1Qs=
|
||||
github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@ -794,6 +824,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@ -804,13 +835,15 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
|
||||
@ -827,7 +860,7 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
@ -872,7 +905,9 @@ github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWk
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/okta/okta-sdk-golang v1.0.1 h1:1DGm5+h2JvfdHz07yVVM7+LgUVSwxnk+6RoLUOB6CwI=
|
||||
github.com/okta/okta-sdk-golang v1.0.1/go.mod h1:8k//sN2mFTq8Ayo90DqGbcumCkSmYjF0+2zkIbZysec=
|
||||
github.com/okta/okta-sdk-golang/v2 v2.12.1 h1:U+smE7trkHSZO8Mval3Ow85dbxawO+pMAr692VZq9gM=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -901,16 +936,19 @@ github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mo
|
||||
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
||||
github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
|
||||
github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
|
||||
github.com/oracle/oci-go-sdk v13.1.0+incompatible h1:inwbT0b/mMbnTfzYoW2xcU1cCMIlU6Fz973at5phRXM=
|
||||
github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU=
|
||||
github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM=
|
||||
github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0=
|
||||
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@ -925,14 +963,16 @@ github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
||||
github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA=
|
||||
github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M=
|
||||
github.com/portworx/sched-ops v0.20.4-openstorage-rc3/go.mod h1:DpRDDqXWQrReFJ5SHWWrURuZdzVKjrh2OxbAfwnrAyk=
|
||||
@ -940,8 +980,10 @@ github.com/portworx/talisman v0.0.0-20191007232806-837747f38224/go.mod h1:OjpMH9
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
||||
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||
github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII=
|
||||
github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
||||
@ -998,7 +1040,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
@ -1011,8 +1053,9 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U=
|
||||
github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY=
|
||||
github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc=
|
||||
github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ=
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@ -1021,8 +1064,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
@ -1031,6 +1074,7 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
|
||||
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
@ -1060,14 +1104,16 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=
|
||||
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
@ -1091,6 +1137,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
@ -1104,6 +1151,7 @@ go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyP
|
||||
go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc=
|
||||
go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
@ -1119,10 +1167,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
|
||||
go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
|
||||
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
|
||||
@ -1133,13 +1179,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUE
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
|
||||
go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs=
|
||||
go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
|
||||
go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
|
||||
go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
@ -1282,8 +1325,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1305,8 +1348,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
|
||||
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -1385,7 +1428,6 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -1396,27 +1438,30 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -1429,8 +1474,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -1549,8 +1594,7 @@ google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
|
||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||
google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -1627,8 +1671,8 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
@ -1683,8 +1727,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM=
|
||||
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -1702,7 +1747,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||
gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
@ -1732,6 +1777,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
@ -1793,6 +1839,7 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
26
vendor/github.com/armon/go-metrics/.gitignore
generated
vendored
Normal file
26
vendor/github.com/armon/go-metrics/.gitignore
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
/metrics.out
|
||||
|
||||
.idea
|
13
vendor/github.com/armon/go-metrics/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/armon/go-metrics/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.x"
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
install:
|
||||
- go get ./...
|
||||
|
||||
script:
|
||||
- go test ./...
|
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
Normal file
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
91
vendor/github.com/armon/go-metrics/README.md
generated
vendored
Normal file
91
vendor/github.com/armon/go-metrics/README.md
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
go-metrics
|
||||
==========
|
||||
|
||||
This library provides a `metrics` package which can be used to instrument code,
|
||||
expose application metrics, and profile runtime performance in a flexible manner.
|
||||
|
||||
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
|
||||
|
||||
Sinks
|
||||
-----
|
||||
|
||||
The `metrics` package makes use of a `MetricSink` interface to support delivery
|
||||
to any type of backend. Currently the following sinks are provided:
|
||||
|
||||
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
|
||||
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
|
||||
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
|
||||
* InmemSink : Provides in-memory aggregation, can be used to export stats
|
||||
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
|
||||
* BlackholeSink : Sinks to nowhere
|
||||
|
||||
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
|
||||
and dump a formatted output of recent metrics. For example, when a process gets
|
||||
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
|
||||
|
||||
Labels
|
||||
------
|
||||
|
||||
Most metrics do have an equivalent ending with `WithLabels`, such methods
|
||||
allow to push metrics with labels and use some features of underlying Sinks
|
||||
(ex: translated into Prometheus labels).
|
||||
|
||||
Since some of these labels may increase greatly cardinality of metrics, the
|
||||
library allow to filter labels using a blacklist/whitelist filtering system
|
||||
which is global to all metrics.
|
||||
|
||||
* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
|
||||
* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
|
||||
|
||||
By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
|
||||
no tags are filetered at all, but it allow to a user to globally block some tags with high
|
||||
cardinality at application level.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Here is an example of using the package:
|
||||
|
||||
```go
|
||||
func SlowMethod() {
|
||||
// Profiling the runtime of a method
|
||||
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
|
||||
}
|
||||
|
||||
// Configure a statsite sink as the global metrics sink
|
||||
sink, _ := metrics.NewStatsiteSink("statsite:8125")
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
|
||||
|
||||
// Emit a Key/Value pair
|
||||
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
|
||||
```
|
||||
|
||||
Here is an example of setting up a signal handler:
|
||||
|
||||
```go
|
||||
// Setup the inmem sink and signal handler
|
||||
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
||||
sig := metrics.DefaultInmemSignal(inm)
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
|
||||
|
||||
// Run some code
|
||||
inm.SetGauge([]string{"foo"}, 42)
|
||||
inm.EmitKey([]string{"bar"}, 30)
|
||||
|
||||
inm.IncrCounter([]string{"baz"}, 42)
|
||||
inm.IncrCounter([]string{"baz"}, 1)
|
||||
inm.IncrCounter([]string{"baz"}, 80)
|
||||
|
||||
inm.AddSample([]string{"method", "wow"}, 42)
|
||||
inm.AddSample([]string{"method", "wow"}, 100)
|
||||
inm.AddSample([]string{"method", "wow"}, 22)
|
||||
|
||||
....
|
||||
```
|
||||
|
||||
When a signal comes in, output like the following will be dumped to stderr:
|
||||
|
||||
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
|
||||
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
|
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
Normal file
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
DefaultSignal = syscall.SIGUSR1
|
||||
)
|
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
Normal file
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
// Windows has no SIGUSR1, use SIGBREAK
|
||||
DefaultSignal = syscall.Signal(21)
|
||||
)
|
339
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
Normal file
339
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
Normal file
@ -0,0 +1,339 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var spaceReplacer = strings.NewReplacer(" ", "_")
|
||||
|
||||
// InmemSink provides a MetricSink that does in-memory aggregation
|
||||
// without sending metrics over a network. It can be embedded within
|
||||
// an application to provide profiling information.
|
||||
type InmemSink struct {
|
||||
// How long is each aggregation interval
|
||||
interval time.Duration
|
||||
|
||||
// Retain controls how many metrics interval we keep
|
||||
retain time.Duration
|
||||
|
||||
// maxIntervals is the maximum length of intervals.
|
||||
// It is retain / interval.
|
||||
maxIntervals int
|
||||
|
||||
// intervals is a slice of the retained intervals
|
||||
intervals []*IntervalMetrics
|
||||
intervalLock sync.RWMutex
|
||||
|
||||
rateDenom float64
|
||||
}
|
||||
|
||||
// IntervalMetrics stores the aggregated metrics
|
||||
// for a specific interval
|
||||
type IntervalMetrics struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The start time of the interval
|
||||
Interval time.Time
|
||||
|
||||
// Gauges maps the key to the last set value
|
||||
Gauges map[string]GaugeValue
|
||||
|
||||
// Points maps the string to the list of emitted values
|
||||
// from EmitKey
|
||||
Points map[string][]float32
|
||||
|
||||
// Counters maps the string key to a sum of the counter
|
||||
// values
|
||||
Counters map[string]SampledValue
|
||||
|
||||
// Samples maps the key to an AggregateSample,
|
||||
// which has the rolled up view of a sample
|
||||
Samples map[string]SampledValue
|
||||
|
||||
// done is closed when this interval has ended, and a new IntervalMetrics
|
||||
// has been created to receive any future metrics.
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
|
||||
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
||||
return &IntervalMetrics{
|
||||
Interval: intv,
|
||||
Gauges: make(map[string]GaugeValue),
|
||||
Points: make(map[string][]float32),
|
||||
Counters: make(map[string]SampledValue),
|
||||
Samples: make(map[string]SampledValue),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateSample is used to hold aggregate metrics
|
||||
// about a sample
|
||||
type AggregateSample struct {
|
||||
Count int // The count of emitted pairs
|
||||
Rate float64 // The values rate per time unit (usually 1 second)
|
||||
Sum float64 // The sum of values
|
||||
SumSq float64 `json:"-"` // The sum of squared values
|
||||
Min float64 // Minimum value
|
||||
Max float64 // Maximum value
|
||||
LastUpdated time.Time `json:"-"` // When value was last updated
|
||||
}
|
||||
|
||||
// Computes a Stddev of the values
|
||||
func (a *AggregateSample) Stddev() float64 {
|
||||
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
|
||||
div := float64(a.Count * (a.Count - 1))
|
||||
if div == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Sqrt(num / div)
|
||||
}
|
||||
|
||||
// Computes a mean of the values
|
||||
func (a *AggregateSample) Mean() float64 {
|
||||
if a.Count == 0 {
|
||||
return 0
|
||||
}
|
||||
return a.Sum / float64(a.Count)
|
||||
}
|
||||
|
||||
// Ingest is used to update a sample
|
||||
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
|
||||
a.Count++
|
||||
a.Sum += v
|
||||
a.SumSq += (v * v)
|
||||
if v < a.Min || a.Count == 1 {
|
||||
a.Min = v
|
||||
}
|
||||
if v > a.Max || a.Count == 1 {
|
||||
a.Max = v
|
||||
}
|
||||
a.Rate = float64(a.Sum) / rateDenom
|
||||
a.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
func (a *AggregateSample) String() string {
|
||||
if a.Count == 0 {
|
||||
return "Count: 0"
|
||||
} else if a.Stddev() == 0 {
|
||||
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
|
||||
} else {
|
||||
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
|
||||
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
params := u.Query()
|
||||
|
||||
interval, err := time.ParseDuration(params.Get("interval"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
|
||||
}
|
||||
|
||||
retain, err := time.ParseDuration(params.Get("retain"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
|
||||
}
|
||||
|
||||
return NewInmemSink(interval, retain), nil
|
||||
}
|
||||
|
||||
// NewInmemSink is used to construct a new in-memory sink.
|
||||
// Uses an aggregation interval and maximum retention period.
|
||||
func NewInmemSink(interval, retain time.Duration) *InmemSink {
|
||||
rateTimeUnit := time.Second
|
||||
i := &InmemSink{
|
||||
interval: interval,
|
||||
retain: retain,
|
||||
maxIntervals: int(retain / interval),
|
||||
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
|
||||
}
|
||||
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGauge(key []string, val float32) {
|
||||
i.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
|
||||
}
|
||||
|
||||
func (i *InmemSink) EmitKey(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
vals := intv.Points[k]
|
||||
intv.Points[k] = append(vals, val)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounter(key []string, val float32) {
|
||||
i.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg, ok := intv.Counters[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Counters[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSample(key []string, val float32) {
|
||||
i.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg, ok := intv.Samples[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Samples[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
// Data is used to retrieve all the aggregated metrics
|
||||
// Intervals may be in use, and a read lock should be acquired
|
||||
func (i *InmemSink) Data() []*IntervalMetrics {
|
||||
// Get the current interval, forces creation
|
||||
i.getInterval()
|
||||
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
n := len(i.intervals)
|
||||
intervals := make([]*IntervalMetrics, n)
|
||||
|
||||
copy(intervals[:n-1], i.intervals[:n-1])
|
||||
current := i.intervals[n-1]
|
||||
|
||||
// make its own copy for current interval
|
||||
intervals[n-1] = &IntervalMetrics{}
|
||||
copyCurrent := intervals[n-1]
|
||||
current.RLock()
|
||||
*copyCurrent = *current
|
||||
// RWMutex is not safe to copy, so create a new instance on the copy
|
||||
copyCurrent.RWMutex = sync.RWMutex{}
|
||||
|
||||
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
|
||||
for k, v := range current.Gauges {
|
||||
copyCurrent.Gauges[k] = v
|
||||
}
|
||||
// saved values will be not change, just copy its link
|
||||
copyCurrent.Points = make(map[string][]float32, len(current.Points))
|
||||
for k, v := range current.Points {
|
||||
copyCurrent.Points[k] = v
|
||||
}
|
||||
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
|
||||
for k, v := range current.Counters {
|
||||
copyCurrent.Counters[k] = v.deepCopy()
|
||||
}
|
||||
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
|
||||
for k, v := range current.Samples {
|
||||
copyCurrent.Samples[k] = v.deepCopy()
|
||||
}
|
||||
current.RUnlock()
|
||||
|
||||
return intervals
|
||||
}
|
||||
|
||||
// getInterval returns the current interval. A new interval is created if no
|
||||
// previous interval exists, or if the current time is beyond the window for the
|
||||
// current interval.
|
||||
func (i *InmemSink) getInterval() *IntervalMetrics {
|
||||
intv := time.Now().Truncate(i.interval)
|
||||
|
||||
// Attempt to return the existing interval first, because it only requires
|
||||
// a read lock.
|
||||
i.intervalLock.RLock()
|
||||
n := len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
defer i.intervalLock.RUnlock()
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
i.intervalLock.RUnlock()
|
||||
|
||||
i.intervalLock.Lock()
|
||||
defer i.intervalLock.Unlock()
|
||||
|
||||
// Re-check for an existing interval now that the lock is re-acquired.
|
||||
n = len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
|
||||
current := NewIntervalMetrics(intv)
|
||||
i.intervals = append(i.intervals, current)
|
||||
if n > 0 {
|
||||
close(i.intervals[n-1].done)
|
||||
}
|
||||
|
||||
n++
|
||||
// Prune old intervals if the count exceeds the max.
|
||||
if n >= i.maxIntervals {
|
||||
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
|
||||
i.intervals = i.intervals[:i.maxIntervals]
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (i *InmemSink) flattenKey(parts []string) string {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
joined := strings.Join(parts, ".")
|
||||
|
||||
spaceReplacer.WriteString(buf, joined)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
|
||||
key := i.flattenKey(parts)
|
||||
buf := bytes.NewBufferString(key)
|
||||
|
||||
for _, label := range labels {
|
||||
spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
|
||||
}
|
||||
|
||||
return buf.String(), key
|
||||
}
|
162
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
162
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricsSummary holds a roll-up of metrics info for a given interval
|
||||
type MetricsSummary struct {
|
||||
Timestamp string
|
||||
Gauges []GaugeValue
|
||||
Points []PointValue
|
||||
Counters []SampledValue
|
||||
Samples []SampledValue
|
||||
}
|
||||
|
||||
type GaugeValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
Value float32
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
type PointValue struct {
|
||||
Name string
|
||||
Points []float32
|
||||
}
|
||||
|
||||
type SampledValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
*AggregateSample
|
||||
Mean float64
|
||||
Stddev float64
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
// deepCopy allocates a new instance of AggregateSample
|
||||
func (source *SampledValue) deepCopy() SampledValue {
|
||||
dest := *source
|
||||
if source.AggregateSample != nil {
|
||||
dest.AggregateSample = &AggregateSample{}
|
||||
*dest.AggregateSample = *source.AggregateSample
|
||||
}
|
||||
return dest
|
||||
}
|
||||
|
||||
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
|
||||
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
data := i.Data()
|
||||
|
||||
var interval *IntervalMetrics
|
||||
n := len(data)
|
||||
switch {
|
||||
case n == 0:
|
||||
return nil, fmt.Errorf("no metric intervals have been initialized yet")
|
||||
case n == 1:
|
||||
// Show the current interval if it's all we have
|
||||
interval = data[0]
|
||||
default:
|
||||
// Show the most recent finished interval if we have one
|
||||
interval = data[n-2]
|
||||
}
|
||||
|
||||
return newMetricSummaryFromInterval(interval), nil
|
||||
}
|
||||
|
||||
func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary {
|
||||
interval.RLock()
|
||||
defer interval.RUnlock()
|
||||
|
||||
summary := MetricsSummary{
|
||||
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
|
||||
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
|
||||
Points: make([]PointValue, 0, len(interval.Points)),
|
||||
}
|
||||
|
||||
// Format and sort the output of each metric type, so it gets displayed in a
|
||||
// deterministic order.
|
||||
for name, points := range interval.Points {
|
||||
summary.Points = append(summary.Points, PointValue{name, points})
|
||||
}
|
||||
sort.Slice(summary.Points, func(i, j int) bool {
|
||||
return summary.Points[i].Name < summary.Points[j].Name
|
||||
})
|
||||
|
||||
for hash, value := range interval.Gauges {
|
||||
value.Hash = hash
|
||||
value.DisplayLabels = make(map[string]string)
|
||||
for _, label := range value.Labels {
|
||||
value.DisplayLabels[label.Name] = label.Value
|
||||
}
|
||||
value.Labels = nil
|
||||
|
||||
summary.Gauges = append(summary.Gauges, value)
|
||||
}
|
||||
sort.Slice(summary.Gauges, func(i, j int) bool {
|
||||
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
|
||||
})
|
||||
|
||||
summary.Counters = formatSamples(interval.Counters)
|
||||
summary.Samples = formatSamples(interval.Samples)
|
||||
|
||||
return summary
|
||||
}
|
||||
|
||||
func formatSamples(source map[string]SampledValue) []SampledValue {
|
||||
output := make([]SampledValue, 0, len(source))
|
||||
for hash, sample := range source {
|
||||
displayLabels := make(map[string]string)
|
||||
for _, label := range sample.Labels {
|
||||
displayLabels[label.Name] = label.Value
|
||||
}
|
||||
|
||||
output = append(output, SampledValue{
|
||||
Name: sample.Name,
|
||||
Hash: hash,
|
||||
AggregateSample: sample.AggregateSample,
|
||||
Mean: sample.AggregateSample.Mean(),
|
||||
Stddev: sample.AggregateSample.Stddev(),
|
||||
DisplayLabels: displayLabels,
|
||||
})
|
||||
}
|
||||
sort.Slice(output, func(i, j int) bool {
|
||||
return output[i].Hash < output[j].Hash
|
||||
})
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
type Encoder interface {
|
||||
Encode(interface{}) error
|
||||
}
|
||||
|
||||
// Stream writes metrics using encoder.Encode each time an interval ends. Runs
|
||||
// until the request context is cancelled, or the encoder returns an error.
|
||||
// The caller is responsible for logging any errors from encoder.
|
||||
func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) {
|
||||
interval := i.getInterval()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-interval.done:
|
||||
summary := newMetricSummaryFromInterval(interval)
|
||||
if err := encoder.Encode(summary); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// update interval to the next one
|
||||
interval = i.getInterval()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
Normal file
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// InmemSignal is used to listen for a given signal, and when received,
|
||||
// to dump the current metrics from the InmemSink to an io.Writer
|
||||
type InmemSignal struct {
|
||||
signal syscall.Signal
|
||||
inm *InmemSink
|
||||
w io.Writer
|
||||
sigCh chan os.Signal
|
||||
|
||||
stop bool
|
||||
stopCh chan struct{}
|
||||
stopLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
|
||||
// and dumps the current metrics out to a writer
|
||||
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
|
||||
i := &InmemSignal{
|
||||
signal: sig,
|
||||
inm: inmem,
|
||||
w: w,
|
||||
sigCh: make(chan os.Signal, 1),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
signal.Notify(i.sigCh, sig)
|
||||
go i.run()
|
||||
return i
|
||||
}
|
||||
|
||||
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
|
||||
// and writes output to stderr. Windows uses SIGBREAK
|
||||
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
|
||||
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
|
||||
}
|
||||
|
||||
// Stop is used to stop the InmemSignal from listening
|
||||
func (i *InmemSignal) Stop() {
|
||||
i.stopLock.Lock()
|
||||
defer i.stopLock.Unlock()
|
||||
|
||||
if i.stop {
|
||||
return
|
||||
}
|
||||
i.stop = true
|
||||
close(i.stopCh)
|
||||
signal.Stop(i.sigCh)
|
||||
}
|
||||
|
||||
// run is a long running routine that handles signals
|
||||
func (i *InmemSignal) run() {
|
||||
for {
|
||||
select {
|
||||
case <-i.sigCh:
|
||||
i.dumpStats()
|
||||
case <-i.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dumpStats is used to dump the data to output writer
|
||||
func (i *InmemSignal) dumpStats() {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
data := i.inm.Data()
|
||||
// Skip the last period which is still being aggregated
|
||||
for j := 0; j < len(data)-1; j++ {
|
||||
intv := data[j]
|
||||
intv.RLock()
|
||||
for _, val := range intv.Gauges {
|
||||
name := i.flattenLabels(val.Name, val.Labels)
|
||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
|
||||
}
|
||||
for name, vals := range intv.Points {
|
||||
for _, val := range vals {
|
||||
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
}
|
||||
}
|
||||
for _, agg := range intv.Counters {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
for _, agg := range intv.Samples {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
intv.RUnlock()
|
||||
}
|
||||
|
||||
// Write out the bytes
|
||||
i.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
|
||||
buf := bytes.NewBufferString(name)
|
||||
replacer := strings.NewReplacer(" ", "_", ":", "_")
|
||||
|
||||
for _, label := range labels {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, label.Value)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
299
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
Normal file
299
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
type Label struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGauge(key []string, val float32) {
|
||||
m.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" {
|
||||
if m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
} else if m.EnableHostname {
|
||||
key = insert(0, m.HostName, key)
|
||||
}
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "gauge", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) EmitKey(key []string, val float32) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "kv", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
allowed, _ := m.allowMetric(key, nil)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.EmitKey(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounter(key []string, val float32) {
|
||||
m.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "counter", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSample(key []string, val float32) {
|
||||
m.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "sample", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSince(key []string, start time.Time) {
|
||||
m.MeasureSinceWithLabels(key, start, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "timer", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(start)
|
||||
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
|
||||
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
|
||||
}
|
||||
|
||||
// UpdateFilter overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilter(allow, block []string) {
|
||||
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
m.filterLock.Lock()
|
||||
defer m.filterLock.Unlock()
|
||||
|
||||
m.AllowedPrefixes = allow
|
||||
m.BlockedPrefixes = block
|
||||
|
||||
if allowedLabels == nil {
|
||||
// Having a white list means we take only elements from it
|
||||
m.allowedLabels = nil
|
||||
} else {
|
||||
m.allowedLabels = make(map[string]bool)
|
||||
for _, v := range allowedLabels {
|
||||
m.allowedLabels[v] = true
|
||||
}
|
||||
}
|
||||
m.blockedLabels = make(map[string]bool)
|
||||
for _, v := range blockedLabels {
|
||||
m.blockedLabels[v] = true
|
||||
}
|
||||
m.AllowedLabels = allowedLabels
|
||||
m.BlockedLabels = blockedLabels
|
||||
|
||||
m.filter = iradix.New()
|
||||
for _, prefix := range m.AllowedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
|
||||
}
|
||||
for _, prefix := range m.BlockedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) Shutdown() {
|
||||
if ss, ok := m.sink.(ShutdownSink); ok {
|
||||
ss.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
// labelIsAllowed return true if a should be included in metric
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) labelIsAllowed(label *Label) bool {
|
||||
labelName := (*label).Name
|
||||
if m.blockedLabels != nil {
|
||||
_, ok := m.blockedLabels[labelName]
|
||||
if ok {
|
||||
// If present, let's remove this label
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m.allowedLabels != nil {
|
||||
_, ok := m.allowedLabels[labelName]
|
||||
return ok
|
||||
}
|
||||
// Allow by default
|
||||
return true
|
||||
}
|
||||
|
||||
// filterLabels return only allowed labels
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) filterLabels(labels []Label) []Label {
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
toReturn := []Label{}
|
||||
for _, label := range labels {
|
||||
if m.labelIsAllowed(&label) {
|
||||
toReturn = append(toReturn, label)
|
||||
}
|
||||
}
|
||||
return toReturn
|
||||
}
|
||||
|
||||
// Returns whether the metric should be allowed based on configured prefix filters
|
||||
// Also return the applicable labels
|
||||
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
|
||||
m.filterLock.RLock()
|
||||
defer m.filterLock.RUnlock()
|
||||
|
||||
if m.filter == nil || m.filter.Len() == 0 {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
|
||||
if !ok {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
return allowed.(bool), m.filterLabels(labels)
|
||||
}
|
||||
|
||||
// Periodically collects runtime stats to publish
|
||||
func (m *Metrics) collectStats() {
|
||||
for {
|
||||
time.Sleep(m.ProfileInterval)
|
||||
m.EmitRuntimeStats()
|
||||
}
|
||||
}
|
||||
|
||||
// Emits various runtime statsitics
|
||||
func (m *Metrics) EmitRuntimeStats() {
|
||||
// Export number of Goroutines
|
||||
numRoutines := runtime.NumGoroutine()
|
||||
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
|
||||
|
||||
// Export memory stats
|
||||
var stats runtime.MemStats
|
||||
runtime.ReadMemStats(&stats)
|
||||
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
|
||||
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
|
||||
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
|
||||
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
|
||||
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
|
||||
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
|
||||
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
|
||||
|
||||
// Export info about the last few GC runs
|
||||
num := stats.NumGC
|
||||
|
||||
// Handle wrap around
|
||||
if num < m.lastNumGC {
|
||||
m.lastNumGC = 0
|
||||
}
|
||||
|
||||
// Ensure we don't scan more than 256
|
||||
if num-m.lastNumGC >= 256 {
|
||||
m.lastNumGC = num - 255
|
||||
}
|
||||
|
||||
for i := m.lastNumGC; i < num; i++ {
|
||||
pause := stats.PauseNs[i%256]
|
||||
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
|
||||
}
|
||||
m.lastNumGC = num
|
||||
}
|
||||
|
||||
// Creates a new slice with the provided string value as the first element
|
||||
// and the provided slice values as the remaining values.
|
||||
// Ordering of the values in the provided input slice is kept in tact in the output slice.
|
||||
func insert(i int, v string, s []string) []string {
|
||||
// Allocate new slice to avoid modifying the input slice
|
||||
newS := make([]string, len(s)+1)
|
||||
|
||||
// Copy s[0, i-1] into newS
|
||||
for j := 0; j < i; j++ {
|
||||
newS[j] = s[j]
|
||||
}
|
||||
|
||||
// Insert provided element at index i
|
||||
newS[i] = v
|
||||
|
||||
// Copy s[i, len(s)-1] into newS starting at newS[i+1]
|
||||
for j := i; j < len(s); j++ {
|
||||
newS[j+1] = s[j]
|
||||
}
|
||||
|
||||
return newS
|
||||
}
|
132
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
Normal file
132
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// The MetricSink interface is used to transmit metrics information
|
||||
// to an external system
|
||||
type MetricSink interface {
|
||||
// A Gauge should retain the last value it is set to
|
||||
SetGauge(key []string, val float32)
|
||||
SetGaugeWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Should emit a Key/Value pair for each call
|
||||
EmitKey(key []string, val float32)
|
||||
|
||||
// Counters should accumulate values
|
||||
IncrCounter(key []string, val float32)
|
||||
IncrCounterWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Samples are for timing information, where quantiles are used
|
||||
AddSample(key []string, val float32)
|
||||
AddSampleWithLabels(key []string, val float32, labels []Label)
|
||||
}
|
||||
|
||||
type ShutdownSink interface {
|
||||
MetricSink
|
||||
|
||||
// Shutdown the metric sink, flush metrics to storage, and cleanup resources.
|
||||
// Called immediately prior to application exit. Implementations must block
|
||||
// until metrics are flushed to storage.
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// BlackholeSink is used to just blackhole messages
|
||||
type BlackholeSink struct{}
|
||||
|
||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
||||
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
||||
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
|
||||
|
||||
// FanoutSink is used to sink to fanout values to multiple sinks
|
||||
type FanoutSink []MetricSink
|
||||
|
||||
func (fh FanoutSink) SetGauge(key []string, val float32) {
|
||||
fh.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) EmitKey(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.EmitKey(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounter(key []string, val float32) {
|
||||
fh.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSample(key []string, val float32) {
|
||||
fh.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) Shutdown() {
|
||||
for _, s := range fh {
|
||||
if ss, ok := s.(ShutdownSink); ok {
|
||||
ss.Shutdown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
|
||||
// by each sink type
|
||||
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
|
||||
|
||||
// sinkRegistry supports the generic NewMetricSink function by mapping URL
|
||||
// schemes to metric sink factory functions
|
||||
var sinkRegistry = map[string]sinkURLFactoryFunc{
|
||||
"statsd": NewStatsdSinkFromURL,
|
||||
"statsite": NewStatsiteSinkFromURL,
|
||||
"inmem": NewInmemSinkFromURL,
|
||||
}
|
||||
|
||||
// NewMetricSinkFromURL allows a generic URL input to configure any of the
|
||||
// supported sinks. The scheme of the URL identifies the type of the sink, the
|
||||
// and query parameters are used to set options.
|
||||
//
|
||||
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
|
||||
// as the "addr" of the sink
|
||||
//
|
||||
// "statsite://" - Initializes a StatsiteSink. The host and port become the
|
||||
// "addr" of the sink
|
||||
//
|
||||
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
|
||||
// "interval" and "duration" query parameters must be specified with valid
|
||||
// durations, see NewInmemSink for details.
|
||||
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
|
||||
if sinkURLFactoryFunc == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
|
||||
}
|
||||
|
||||
return sinkURLFactoryFunc(u)
|
||||
}
|
158
vendor/github.com/armon/go-metrics/start.go
generated
vendored
Normal file
158
vendor/github.com/armon/go-metrics/start.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// Config is used to configure metrics settings
|
||||
type Config struct {
|
||||
ServiceName string // Prefixed with keys to separate services
|
||||
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
|
||||
EnableHostname bool // Enable prefixing gauge values with hostname
|
||||
EnableHostnameLabel bool // Enable adding hostname to labels
|
||||
EnableServiceLabel bool // Enable adding service to labels
|
||||
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
|
||||
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
|
||||
TimerGranularity time.Duration // Granularity of timers.
|
||||
ProfileInterval time.Duration // Interval to profile runtime metrics
|
||||
|
||||
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
|
||||
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
|
||||
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
|
||||
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
|
||||
FilterDefault bool // Whether to allow metrics by default
|
||||
}
|
||||
|
||||
// Metrics represents an instance of a metrics sink that can
|
||||
// be used to emit
|
||||
type Metrics struct {
|
||||
Config
|
||||
lastNumGC uint32
|
||||
sink MetricSink
|
||||
filter *iradix.Tree
|
||||
allowedLabels map[string]bool
|
||||
blockedLabels map[string]bool
|
||||
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
|
||||
}
|
||||
|
||||
// Shared global metrics instance
|
||||
var globalMetrics atomic.Value // *Metrics
|
||||
|
||||
func init() {
|
||||
// Initialize to a blackhole sink to avoid errors
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
}
|
||||
|
||||
// Default returns the shared global metrics instance.
|
||||
func Default() *Metrics {
|
||||
return globalMetrics.Load().(*Metrics)
|
||||
}
|
||||
|
||||
// DefaultConfig provides a sane default configuration
|
||||
func DefaultConfig(serviceName string) *Config {
|
||||
c := &Config{
|
||||
ServiceName: serviceName, // Use client provided service
|
||||
HostName: "",
|
||||
EnableHostname: true, // Enable hostname prefix
|
||||
EnableRuntimeMetrics: true, // Enable runtime profiling
|
||||
EnableTypePrefix: false, // Disable type prefix
|
||||
TimerGranularity: time.Millisecond, // Timers are in milliseconds
|
||||
ProfileInterval: time.Second, // Poll runtime every second
|
||||
FilterDefault: true, // Don't filter metrics by default
|
||||
}
|
||||
|
||||
// Try to get the hostname
|
||||
name, _ := os.Hostname()
|
||||
c.HostName = name
|
||||
return c
|
||||
}
|
||||
|
||||
// New is used to create a new instance of Metrics
|
||||
func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
met := &Metrics{}
|
||||
met.Config = *conf
|
||||
met.sink = sink
|
||||
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
|
||||
|
||||
// Start the runtime collector
|
||||
if conf.EnableRuntimeMetrics {
|
||||
go met.collectStats()
|
||||
}
|
||||
return met, nil
|
||||
}
|
||||
|
||||
// NewGlobal is the same as New, but it assigns the metrics object to be
|
||||
// used globally as well as returning it.
|
||||
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
metrics, err := New(conf, sink)
|
||||
if err == nil {
|
||||
globalMetrics.Store(metrics)
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// Proxy all the methods to the globalMetrics instance
|
||||
func SetGauge(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).SetGauge(key, val)
|
||||
}
|
||||
|
||||
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func EmitKey(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).EmitKey(key, val)
|
||||
}
|
||||
|
||||
func IncrCounter(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
|
||||
}
|
||||
|
||||
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func AddSample(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).AddSample(key, val)
|
||||
}
|
||||
|
||||
func AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func MeasureSince(key []string, start time.Time) {
|
||||
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
|
||||
}
|
||||
|
||||
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
|
||||
}
|
||||
|
||||
func UpdateFilter(allow, block []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
|
||||
// and blockedLabels - when not nil - allow filtering of labels in order to
|
||||
// block/allow globally labels (especially useful when having large number of
|
||||
// values for a given label). See README.md for more information about usage.
|
||||
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
|
||||
}
|
||||
|
||||
// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage.
|
||||
// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources.
|
||||
// This is intended for use immediately prior to application exit.
|
||||
func Shutdown() {
|
||||
m := globalMetrics.Load().(*Metrics)
|
||||
// Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a
|
||||
// reason to expect that calls to the library will successfully collect metrics after Shutdown
|
||||
// has been called.
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
m.Shutdown()
|
||||
}
|
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
Normal file
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// statsdMaxLen is the maximum size of a packet
|
||||
// to send to statsd
|
||||
statsdMaxLen = 1400
|
||||
)
|
||||
|
||||
// StatsdSink provides a MetricSink that can be used
|
||||
// with a statsite or statsd metrics server. It uses
|
||||
// only UDP packets, while StatsiteSink uses TCP.
|
||||
type StatsdSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsdSink(u.Host)
|
||||
}
|
||||
|
||||
// NewStatsdSink is used to create a new StatsdSink
|
||||
func NewStatsdSink(addr string) (*StatsdSink, error) {
|
||||
s := &StatsdSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsd
|
||||
func (s *StatsdSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsdSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsdSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Create a buffer
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("udp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Check if this would overflow the packet size
|
||||
if len(metric)+buf.Len() > statsdMaxLen {
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Append to the buffer
|
||||
buf.WriteString(metric)
|
||||
|
||||
case <-ticker.C:
|
||||
if buf.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
Normal file
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// We force flush the statsite metrics after this period of
|
||||
// inactivity. Prevents stats from getting stuck in a buffer
|
||||
// forever.
|
||||
flushInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsiteSink(u.Host)
|
||||
}
|
||||
|
||||
// StatsiteSink provides a MetricSink that can be used with a
|
||||
// statsite metrics server
|
||||
type StatsiteSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsiteSink is used to create a new StatsiteSink
|
||||
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
|
||||
s := &StatsiteSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsite
|
||||
func (s *StatsiteSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsiteSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsiteSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
var buffered *bufio.Writer
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Create a buffered writer
|
||||
buffered = bufio.NewWriter(sock)
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Try to send to statsite
|
||||
_, err := buffered.Write([]byte(metric))
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
case <-ticker.C:
|
||||
if err := buffered.Flush(); err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
2
vendor/github.com/ceph/ceph-csi/api/deploy/ocp/scc.yaml
generated
vendored
2
vendor/github.com/ceph/ceph-csi/api/deploy/ocp/scc.yaml
generated
vendored
@ -10,7 +10,7 @@ allowHostNetwork: true
|
||||
# This need to be set to true as we use HostPath
|
||||
allowHostDirVolumePlugin: true
|
||||
priority:
|
||||
# SYS_ADMIN is needed for rbd to execture rbd map command
|
||||
# SYS_ADMIN is needed for rbd to execute rbd map command
|
||||
allowedCapabilities: ["SYS_ADMIN"]
|
||||
# Needed as we run liveness container on daemonset pods
|
||||
allowHostPorts: true
|
||||
|
14
vendor/github.com/fatih/color/README.md
generated
vendored
14
vendor/github.com/fatih/color/README.md
generated
vendored
@ -7,7 +7,6 @@ suits you.
|
||||
|
||||
![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg)
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
@ -124,17 +123,17 @@ fmt.Println("All text will now be bold magenta.")
|
||||
```
|
||||
|
||||
### Disable/Enable color
|
||||
|
||||
|
||||
There might be a case where you want to explicitly disable/enable color output. the
|
||||
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||
(for example if the output were piped directly to `less`).
|
||||
|
||||
The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
|
||||
variable is set (regardless of its value).
|
||||
variable is set to a non-empty string.
|
||||
|
||||
`Color` has support to disable/enable colors programatically both globally and
|
||||
`Color` has support to disable/enable colors programmatically both globally and
|
||||
for single color definitions. For example suppose you have a CLI app and a
|
||||
`--no-color` bool flag. You can easily disable the color output with:
|
||||
`-no-color` bool flag. You can easily disable the color output with:
|
||||
|
||||
```go
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
@ -167,11 +166,10 @@ To output color in GitHub Actions (or other CI systems that support ANSI colors)
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||
|
||||
## License
|
||||
|
||||
|
46
vendor/github.com/fatih/color/color.go
generated
vendored
46
vendor/github.com/fatih/color/color.go
generated
vendored
@ -19,10 +19,10 @@ var (
|
||||
// set (regardless of its value). This is a global option and affects all
|
||||
// colors. For more control over each color block use the methods
|
||||
// DisableColor() individually.
|
||||
NoColor = noColorExists() || os.Getenv("TERM") == "dumb" ||
|
||||
NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" ||
|
||||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
|
||||
|
||||
// Output defines the standard output of the print functions. By default
|
||||
// Output defines the standard output of the print functions. By default,
|
||||
// os.Stdout is used.
|
||||
Output = colorable.NewColorableStdout()
|
||||
|
||||
@ -35,10 +35,9 @@ var (
|
||||
colorsCacheMu sync.Mutex // protects colorsCache
|
||||
)
|
||||
|
||||
// noColorExists returns true if the environment variable NO_COLOR exists.
|
||||
func noColorExists() bool {
|
||||
_, exists := os.LookupEnv("NO_COLOR")
|
||||
return exists
|
||||
// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string.
|
||||
func noColorIsSet() bool {
|
||||
return os.Getenv("NO_COLOR") != ""
|
||||
}
|
||||
|
||||
// Color defines a custom color object which is defined by SGR parameters.
|
||||
@ -120,7 +119,7 @@ func New(value ...Attribute) *Color {
|
||||
params: make([]Attribute, 0),
|
||||
}
|
||||
|
||||
if noColorExists() {
|
||||
if noColorIsSet() {
|
||||
c.noColor = boolPtr(true)
|
||||
}
|
||||
|
||||
@ -152,7 +151,7 @@ func (c *Color) Set() *Color {
|
||||
return c
|
||||
}
|
||||
|
||||
fmt.Fprintf(Output, c.format())
|
||||
fmt.Fprint(Output, c.format())
|
||||
return c
|
||||
}
|
||||
|
||||
@ -164,16 +163,21 @@ func (c *Color) unset() {
|
||||
Unset()
|
||||
}
|
||||
|
||||
func (c *Color) setWriter(w io.Writer) *Color {
|
||||
// SetWriter is used to set the SGR sequence with the given io.Writer. This is
|
||||
// a low-level function, and users should use the higher-level functions, such
|
||||
// as color.Fprint, color.Print, etc.
|
||||
func (c *Color) SetWriter(w io.Writer) *Color {
|
||||
if c.isNoColorSet() {
|
||||
return c
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, c.format())
|
||||
fmt.Fprint(w, c.format())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) unsetWriter(w io.Writer) {
|
||||
// UnsetWriter resets all escape attributes and clears the output with the give
|
||||
// io.Writer. Usually should be called after SetWriter().
|
||||
func (c *Color) UnsetWriter(w io.Writer) {
|
||||
if c.isNoColorSet() {
|
||||
return
|
||||
}
|
||||
@ -192,20 +196,14 @@ func (c *Color) Add(value ...Attribute) *Color {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) prepend(value Attribute) {
|
||||
c.params = append(c.params, 0)
|
||||
copy(c.params[1:], c.params[0:])
|
||||
c.params[0] = value
|
||||
}
|
||||
|
||||
// Fprint formats using the default formats for its operands and writes to w.
|
||||
// Spaces are added between operands when neither is a string.
|
||||
// It returns the number of bytes written and any write error encountered.
|
||||
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||
// type *os.File.
|
||||
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
c.setWriter(w)
|
||||
defer c.unsetWriter(w)
|
||||
c.SetWriter(w)
|
||||
defer c.UnsetWriter(w)
|
||||
|
||||
return fmt.Fprint(w, a...)
|
||||
}
|
||||
@ -227,8 +225,8 @@ func (c *Color) Print(a ...interface{}) (n int, err error) {
|
||||
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||
// type *os.File.
|
||||
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
c.setWriter(w)
|
||||
defer c.unsetWriter(w)
|
||||
c.SetWriter(w)
|
||||
defer c.UnsetWriter(w)
|
||||
|
||||
return fmt.Fprintf(w, format, a...)
|
||||
}
|
||||
@ -248,8 +246,8 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||
// type *os.File.
|
||||
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
c.setWriter(w)
|
||||
defer c.unsetWriter(w)
|
||||
c.SetWriter(w)
|
||||
defer c.UnsetWriter(w)
|
||||
|
||||
return fmt.Fprintln(w, a...)
|
||||
}
|
||||
@ -396,7 +394,7 @@ func (c *Color) DisableColor() {
|
||||
}
|
||||
|
||||
// EnableColor enables the color output. Use it in conjunction with
|
||||
// DisableColor(). Otherwise this method has no side effects.
|
||||
// DisableColor(). Otherwise, this method has no side effects.
|
||||
func (c *Color) EnableColor() {
|
||||
c.noColor = boolPtr(false)
|
||||
}
|
||||
|
137
vendor/github.com/fatih/color/doc.go
generated
vendored
137
vendor/github.com/fatih/color/doc.go
generated
vendored
@ -5,106 +5,105 @@ that suits you.
|
||||
|
||||
Use simple and default helper functions with predefined foreground colors:
|
||||
|
||||
color.Cyan("Prints text in cyan.")
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// a newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
// a newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// More default foreground colors..
|
||||
color.Red("We have red")
|
||||
color.Yellow("Yellow color too!")
|
||||
color.Magenta("And many others ..")
|
||||
// More default foreground colors..
|
||||
color.Red("We have red")
|
||||
color.Yellow("Yellow color too!")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
// Hi-intensity colors
|
||||
color.HiGreen("Bright green color.")
|
||||
color.HiBlack("Bright black means gray..")
|
||||
color.HiWhite("Shiny white color!")
|
||||
// Hi-intensity colors
|
||||
color.HiGreen("Bright green color.")
|
||||
color.HiBlack("Bright black means gray..")
|
||||
color.HiWhite("Shiny white color!")
|
||||
|
||||
However there are times where custom color mixes are required. Below are some
|
||||
However, there are times when custom color mixes are required. Below are some
|
||||
examples to create custom color objects and use the print functions of each
|
||||
separate color object.
|
||||
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with White background.")
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with White background.")
|
||||
|
||||
// Use your own io.Writer output
|
||||
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||
// Use your own io.Writer output
|
||||
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||
|
||||
blue := color.New(color.FgBlue)
|
||||
blue.Fprint(myWriter, "This will print text in blue.")
|
||||
blue := color.New(color.FgBlue)
|
||||
blue.Fprint(myWriter, "This will print text in blue.")
|
||||
|
||||
You can create PrintXxx functions to simplify even more:
|
||||
|
||||
// Create a custom print function for convenient
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("warning")
|
||||
red("error: %s", err)
|
||||
// Create a custom print function for convenient
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("warning")
|
||||
red("error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("don't forget this...")
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("don't forget this...")
|
||||
|
||||
You can also FprintXxx functions to pass your own io.Writer:
|
||||
|
||||
blue := color.New(FgBlue).FprintfFunc()
|
||||
blue(myWriter, "important notice: %s", stars)
|
||||
|
||||
// Mix up with multiple attributes
|
||||
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||
success(myWriter, don't forget this...")
|
||||
blue := color.New(FgBlue).FprintfFunc()
|
||||
blue(myWriter, "important notice: %s", stars)
|
||||
|
||||
// Mix up with multiple attributes
|
||||
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||
success(myWriter, don't forget this...")
|
||||
|
||||
Or create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
|
||||
yellow := New(FgYellow).SprintFunc()
|
||||
red := New(FgRed).SprintFunc()
|
||||
yellow := New(FgYellow).SprintFunc()
|
||||
red := New(FgRed).SprintFunc()
|
||||
|
||||
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Printf("this %s rocks!\n", info("package"))
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Printf("this %s rocks!\n", info("package"))
|
||||
|
||||
Windows support is enabled by default. All Print functions work as intended.
|
||||
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||
However, only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||
set the output to color.Output:
|
||||
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||
|
||||
Using with existing code is possible. Just use the Set() method to set the
|
||||
standard output to the given parameters. That way a rewrite of an existing
|
||||
code is not required.
|
||||
|
||||
// Use handy standard colors.
|
||||
color.Set(color.FgYellow)
|
||||
// Use handy standard colors.
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will be now in Yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
fmt.Println("Existing text will be now in Yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // don't forget to unset
|
||||
color.Unset() // don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // use it in your function
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // use it in your function
|
||||
|
||||
fmt.Println("All text will be now bold magenta.")
|
||||
fmt.Println("All text will be now bold magenta.")
|
||||
|
||||
There might be a case where you want to disable color output (for example to
|
||||
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||
@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example
|
||||
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||
the color output with:
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
|
||||
You can also disable the color by setting the NO_COLOR environment variable to any value.
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
*/
|
||||
package color
|
||||
|
4
vendor/github.com/hashicorp/go-hclog/LICENSE
generated
vendored
4
vendor/github.com/hashicorp/go-hclog/LICENSE
generated
vendored
@ -1,6 +1,4 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 HashiCorp
|
||||
Copyright (c) 2017 HashiCorp, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
2
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
2
vendor/github.com/hashicorp/go-hclog/README.md
generated
vendored
@ -17,7 +17,7 @@ JSON output mode for production.
|
||||
|
||||
## Stability Note
|
||||
|
||||
This library has reached 1.0 stability. It's API can be considered solidified
|
||||
This library has reached 1.0 stability. Its API can be considered solidified
|
||||
and promised through future versions.
|
||||
|
||||
## Installation and Docs
|
||||
|
39
vendor/github.com/hashicorp/go-hclog/colorize_unix.go
generated
vendored
39
vendor/github.com/hashicorp/go-hclog/colorize_unix.go
generated
vendored
@ -1,3 +1,7 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package hclog
|
||||
@ -6,22 +10,35 @@ import (
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// hasFD is used to check if the writer has an Fd value to check
|
||||
// if it's a terminal.
|
||||
type hasFD interface {
|
||||
Fd() uintptr
|
||||
}
|
||||
|
||||
// setColorization will mutate the values of this logger
|
||||
// to approperately configure colorization options. It provides
|
||||
// to appropriately configure colorization options. It provides
|
||||
// a wrapper to the output stream on Windows systems.
|
||||
func (l *intLogger) setColorization(opts *LoggerOptions) {
|
||||
switch opts.Color {
|
||||
case ColorOff:
|
||||
fallthrough
|
||||
case ForceColor:
|
||||
if opts.Color != AutoColor {
|
||||
return
|
||||
case AutoColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
isUnixTerm := isatty.IsTerminal(fi.Fd())
|
||||
isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd())
|
||||
isTerm := isUnixTerm || isCygwinTerm
|
||||
if !isTerm {
|
||||
}
|
||||
|
||||
if sc, ok := l.writer.w.(SupportsColor); ok {
|
||||
if !sc.SupportsColor() {
|
||||
l.headerColor = ColorOff
|
||||
l.writer.color = ColorOff
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fi, ok := l.writer.w.(hasFD)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if !isatty.IsTerminal(fi.Fd()) {
|
||||
l.headerColor = ColorOff
|
||||
l.writer.color = ColorOff
|
||||
}
|
||||
}
|
||||
|
42
vendor/github.com/hashicorp/go-hclog/colorize_windows.go
generated
vendored
42
vendor/github.com/hashicorp/go-hclog/colorize_windows.go
generated
vendored
@ -1,3 +1,7 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package hclog
|
||||
@ -6,28 +10,32 @@ import (
|
||||
"os"
|
||||
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// setColorization will mutate the values of this logger
|
||||
// to approperately configure colorization options. It provides
|
||||
// to appropriately configure colorization options. It provides
|
||||
// a wrapper to the output stream on Windows systems.
|
||||
func (l *intLogger) setColorization(opts *LoggerOptions) {
|
||||
switch opts.Color {
|
||||
case ColorOff:
|
||||
if opts.Color == ColorOff {
|
||||
return
|
||||
case ForceColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
l.writer.w = colorable.NewColorable(fi)
|
||||
case AutoColor:
|
||||
fi := l.checkWriterIsFile()
|
||||
isUnixTerm := isatty.IsTerminal(os.Stdout.Fd())
|
||||
isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd())
|
||||
isTerm := isUnixTerm || isCygwinTerm
|
||||
if !isTerm {
|
||||
l.writer.color = ColorOff
|
||||
return
|
||||
}
|
||||
l.writer.w = colorable.NewColorable(fi)
|
||||
}
|
||||
|
||||
fi, ok := l.writer.w.(*os.File)
|
||||
if !ok {
|
||||
l.writer.color = ColorOff
|
||||
l.headerColor = ColorOff
|
||||
return
|
||||
}
|
||||
|
||||
cfi := colorable.NewColorable(fi)
|
||||
|
||||
// NewColorable detects if color is possible and if it's not, then it
|
||||
// returns the original value. So we can test if we got the original
|
||||
// value back to know if color is possible.
|
||||
if cfi == fi {
|
||||
l.writer.color = ColorOff
|
||||
l.headerColor = ColorOff
|
||||
} else {
|
||||
l.writer.w = cfi
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/context.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/context.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/exclude.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/exclude.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
|
7
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
7
vendor/github.com/hashicorp/go-hclog/global.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
@ -20,13 +23,13 @@ var (
|
||||
)
|
||||
|
||||
// Default returns a globally held logger. This can be a good starting
|
||||
// place, and then you can use .With() and .Name() to create sub-loggers
|
||||
// place, and then you can use .With() and .Named() to create sub-loggers
|
||||
// to be used in more specific contexts.
|
||||
// The value of the Default logger can be set via SetDefault() or by
|
||||
// changing the options in DefaultOptions.
|
||||
//
|
||||
// This method is goroutine safe, returning a global from memory, but
|
||||
// cause should be used if SetDefault() is called it random times
|
||||
// care should be used if SetDefault() is called it random times
|
||||
// in the program as that may result in race conditions and an unexpected
|
||||
// Logger being returned.
|
||||
func Default() Logger {
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/interceptlogger.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
|
225
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
225
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
@ -8,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
@ -17,6 +19,8 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
@ -48,6 +52,12 @@ var (
|
||||
Warn: color.New(color.FgHiYellow),
|
||||
Error: color.New(color.FgHiRed),
|
||||
}
|
||||
|
||||
faintBoldColor = color.New(color.Faint, color.Bold)
|
||||
faintColor = color.New(color.Faint)
|
||||
faintMultiLinePrefix = faintColor.Sprint(" | ")
|
||||
faintFieldSeparator = faintColor.Sprint("=")
|
||||
faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n")
|
||||
)
|
||||
|
||||
// Make sure that intLogger is a Logger
|
||||
@ -69,12 +79,17 @@ type intLogger struct {
|
||||
writer *writer
|
||||
level *int32
|
||||
|
||||
headerColor ColorOption
|
||||
fieldColor ColorOption
|
||||
|
||||
implied []interface{}
|
||||
|
||||
exclude func(level Level, msg string, args ...interface{}) bool
|
||||
|
||||
// create subloggers with their own level setting
|
||||
independentLevels bool
|
||||
|
||||
subloggerHook func(sub Logger) Logger
|
||||
}
|
||||
|
||||
// New returns a configured logger.
|
||||
@ -113,6 +128,21 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
mutex = new(sync.Mutex)
|
||||
}
|
||||
|
||||
var (
|
||||
primaryColor ColorOption = ColorOff
|
||||
headerColor ColorOption = ColorOff
|
||||
fieldColor ColorOption = ColorOff
|
||||
)
|
||||
switch {
|
||||
case opts.ColorHeaderOnly:
|
||||
headerColor = opts.Color
|
||||
case opts.ColorHeaderAndFields:
|
||||
fieldColor = opts.Color
|
||||
headerColor = opts.Color
|
||||
default:
|
||||
primaryColor = opts.Color
|
||||
}
|
||||
|
||||
l := &intLogger{
|
||||
json: opts.JSONFormat,
|
||||
name: opts.Name,
|
||||
@ -120,10 +150,13 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
timeFn: time.Now,
|
||||
disableTime: opts.DisableTime,
|
||||
mutex: mutex,
|
||||
writer: newWriter(output, opts.Color),
|
||||
writer: newWriter(output, primaryColor),
|
||||
level: new(int32),
|
||||
exclude: opts.Exclude,
|
||||
independentLevels: opts.IndependentLevels,
|
||||
headerColor: headerColor,
|
||||
fieldColor: fieldColor,
|
||||
subloggerHook: opts.SubloggerHook,
|
||||
}
|
||||
if opts.IncludeLocation {
|
||||
l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
|
||||
@ -139,6 +172,10 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
l.timeFormat = opts.TimeFormat
|
||||
}
|
||||
|
||||
if l.subloggerHook == nil {
|
||||
l.subloggerHook = identityHook
|
||||
}
|
||||
|
||||
l.setColorization(opts)
|
||||
|
||||
atomic.StoreInt32(l.level, int32(level))
|
||||
@ -146,8 +183,12 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
return l
|
||||
}
|
||||
|
||||
func identityHook(logger Logger) Logger {
|
||||
return logger
|
||||
}
|
||||
|
||||
// offsetIntLogger is the stack frame offset in the call stack for the caller to
|
||||
// one of the Warn,Info,Log,etc methods.
|
||||
// one of the Warn, Info, Log, etc methods.
|
||||
const offsetIntLogger = 3
|
||||
|
||||
// Log a message and a set of key/value pairs if the given level is at
|
||||
@ -222,7 +263,17 @@ func needsQuoting(str string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Non-JSON logging format function
|
||||
// logPlain is the non-JSON logging format function which writes directly
|
||||
// to the underlying writer the logger was initialized with.
|
||||
//
|
||||
// If the logger was initialized with a color function, it also handles
|
||||
// applying the color to the log message.
|
||||
//
|
||||
// Color Options
|
||||
// 1. No color.
|
||||
// 2. Color the whole log line, based on the level.
|
||||
// 3. Color only the header (level) part of the log line.
|
||||
// 4. Color both the header and fields of the log line.
|
||||
func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
|
||||
|
||||
if !l.disableTime {
|
||||
@ -232,7 +283,12 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
|
||||
s, ok := _levelToBracket[level]
|
||||
if ok {
|
||||
l.writer.WriteString(s)
|
||||
if l.headerColor != ColorOff {
|
||||
color := _levelToColor[level]
|
||||
color.Fprint(l.writer, s)
|
||||
} else {
|
||||
l.writer.WriteString(s)
|
||||
}
|
||||
} else {
|
||||
l.writer.WriteString("[?????]")
|
||||
}
|
||||
@ -251,16 +307,19 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
|
||||
if name != "" {
|
||||
l.writer.WriteString(name)
|
||||
l.writer.WriteString(": ")
|
||||
if msg != "" {
|
||||
l.writer.WriteString(": ")
|
||||
l.writer.WriteString(msg)
|
||||
}
|
||||
} else if msg != "" {
|
||||
l.writer.WriteString(msg)
|
||||
}
|
||||
|
||||
l.writer.WriteString(msg)
|
||||
|
||||
args = append(l.implied, args...)
|
||||
|
||||
var stacktrace CapturedStacktrace
|
||||
|
||||
if args != nil && len(args) > 0 {
|
||||
if len(args) > 0 {
|
||||
if len(args)%2 != 0 {
|
||||
cs, ok := args[len(args)-1].(CapturedStacktrace)
|
||||
if ok {
|
||||
@ -274,13 +333,16 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
|
||||
l.writer.WriteByte(':')
|
||||
|
||||
// Handle the field arguments, which come in pairs (key=val).
|
||||
FOR:
|
||||
for i := 0; i < len(args); i = i + 2 {
|
||||
var (
|
||||
key string
|
||||
val string
|
||||
raw bool
|
||||
)
|
||||
|
||||
// Convert the field value to a string.
|
||||
switch st := args[i+1].(type) {
|
||||
case string:
|
||||
val = st
|
||||
@ -332,8 +394,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
}
|
||||
}
|
||||
|
||||
var key string
|
||||
|
||||
// Convert the field key to a string.
|
||||
switch st := args[i].(type) {
|
||||
case string:
|
||||
key = st
|
||||
@ -341,21 +402,49 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string,
|
||||
key = fmt.Sprintf("%s", st)
|
||||
}
|
||||
|
||||
// Optionally apply the ANSI "faint" and "bold"
|
||||
// SGR values to the key.
|
||||
if l.fieldColor != ColorOff {
|
||||
key = faintBoldColor.Sprint(key)
|
||||
}
|
||||
|
||||
// Values may contain multiple lines, and that format
|
||||
// is preserved, with each line prefixed with a " | "
|
||||
// to show it's part of a collection of lines.
|
||||
//
|
||||
// Values may also need quoting, if not all the runes
|
||||
// in the value string are "normal", like if they
|
||||
// contain ANSI escape sequences.
|
||||
if strings.Contains(val, "\n") {
|
||||
l.writer.WriteString("\n ")
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteString("=\n")
|
||||
writeIndent(l.writer, val, " | ")
|
||||
if l.fieldColor != ColorOff {
|
||||
l.writer.WriteString(faintFieldSeparatorWithNewLine)
|
||||
writeIndent(l.writer, val, faintMultiLinePrefix)
|
||||
} else {
|
||||
l.writer.WriteString("=\n")
|
||||
writeIndent(l.writer, val, " | ")
|
||||
}
|
||||
l.writer.WriteString(" ")
|
||||
} else if !raw && needsQuoting(val) {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteByte('=')
|
||||
l.writer.WriteString(strconv.Quote(val))
|
||||
if l.fieldColor != ColorOff {
|
||||
l.writer.WriteString(faintFieldSeparator)
|
||||
} else {
|
||||
l.writer.WriteByte('=')
|
||||
}
|
||||
l.writer.WriteByte('"')
|
||||
writeEscapedForOutput(l.writer, val, true)
|
||||
l.writer.WriteByte('"')
|
||||
} else {
|
||||
l.writer.WriteByte(' ')
|
||||
l.writer.WriteString(key)
|
||||
l.writer.WriteByte('=')
|
||||
if l.fieldColor != ColorOff {
|
||||
l.writer.WriteString(faintFieldSeparator)
|
||||
} else {
|
||||
l.writer.WriteByte('=')
|
||||
}
|
||||
l.writer.WriteString(val)
|
||||
}
|
||||
}
|
||||
@ -375,19 +464,98 @@ func writeIndent(w *writer, str string, indent string) {
|
||||
if nl == -1 {
|
||||
if str != "" {
|
||||
w.WriteString(indent)
|
||||
w.WriteString(str)
|
||||
writeEscapedForOutput(w, str, false)
|
||||
w.WriteString("\n")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteString(indent)
|
||||
w.WriteString(str[:nl])
|
||||
writeEscapedForOutput(w, str[:nl], false)
|
||||
w.WriteString("\n")
|
||||
str = str[nl+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func needsEscaping(str string) bool {
|
||||
for _, b := range str {
|
||||
if !unicode.IsPrint(b) || b == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
lowerhex = "0123456789abcdef"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) {
|
||||
if !needsEscaping(str) {
|
||||
w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
bb := bufPool.Get().(*bytes.Buffer)
|
||||
bb.Reset()
|
||||
|
||||
defer bufPool.Put(bb)
|
||||
|
||||
for _, r := range str {
|
||||
if escapeQuotes && r == '"' {
|
||||
bb.WriteString(`\"`)
|
||||
} else if unicode.IsPrint(r) {
|
||||
bb.WriteRune(r)
|
||||
} else {
|
||||
switch r {
|
||||
case '\a':
|
||||
bb.WriteString(`\a`)
|
||||
case '\b':
|
||||
bb.WriteString(`\b`)
|
||||
case '\f':
|
||||
bb.WriteString(`\f`)
|
||||
case '\n':
|
||||
bb.WriteString(`\n`)
|
||||
case '\r':
|
||||
bb.WriteString(`\r`)
|
||||
case '\t':
|
||||
bb.WriteString(`\t`)
|
||||
case '\v':
|
||||
bb.WriteString(`\v`)
|
||||
default:
|
||||
switch {
|
||||
case r < ' ':
|
||||
bb.WriteString(`\x`)
|
||||
bb.WriteByte(lowerhex[byte(r)>>4])
|
||||
bb.WriteByte(lowerhex[byte(r)&0xF])
|
||||
case !utf8.ValidRune(r):
|
||||
r = 0xFFFD
|
||||
fallthrough
|
||||
case r < 0x10000:
|
||||
bb.WriteString(`\u`)
|
||||
for s := 12; s >= 0; s -= 4 {
|
||||
bb.WriteByte(lowerhex[r>>uint(s)&0xF])
|
||||
}
|
||||
default:
|
||||
bb.WriteString(`\U`)
|
||||
for s := 28; s >= 0; s -= 4 {
|
||||
bb.WriteByte(lowerhex[r>>uint(s)&0xF])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
w.Write(bb.Bytes())
|
||||
}
|
||||
|
||||
func (l *intLogger) renderSlice(v reflect.Value) string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
@ -620,7 +788,7 @@ func (l *intLogger) With(args ...interface{}) Logger {
|
||||
sl.implied = append(sl.implied, MissingKey, extra)
|
||||
}
|
||||
|
||||
return sl
|
||||
return l.subloggerHook(sl)
|
||||
}
|
||||
|
||||
// Create a new sub-Logger that a name decending from the current name.
|
||||
@ -634,7 +802,7 @@ func (l *intLogger) Named(name string) Logger {
|
||||
sl.name = name
|
||||
}
|
||||
|
||||
return sl
|
||||
return l.subloggerHook(sl)
|
||||
}
|
||||
|
||||
// Create a new sub-Logger with an explicit name. This ignores the current
|
||||
@ -645,7 +813,7 @@ func (l *intLogger) ResetNamed(name string) Logger {
|
||||
|
||||
sl.name = name
|
||||
|
||||
return sl
|
||||
return l.subloggerHook(sl)
|
||||
}
|
||||
|
||||
func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
|
||||
@ -689,6 +857,11 @@ func (l *intLogger) SetLevel(level Level) {
|
||||
atomic.StoreInt32(l.level, int32(level))
|
||||
}
|
||||
|
||||
// Returns the current level
|
||||
func (l *intLogger) GetLevel() Level {
|
||||
return Level(atomic.LoadInt32(l.level))
|
||||
}
|
||||
|
||||
// Create a *log.Logger that will send it's data through this Logger. This
|
||||
// allows packages that expect to be using the standard library log to actually
|
||||
// use this logger.
|
||||
@ -716,16 +889,6 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the underlying io.Writer is a file, and
|
||||
// panics if not. For use by colorization.
|
||||
func (l *intLogger) checkWriterIsFile() *os.File {
|
||||
fi, ok := l.writer.w.(*os.File)
|
||||
if !ok {
|
||||
panic("Cannot enable coloring of non-file Writers")
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// Accept implements the SinkAdapter interface
|
||||
func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
|
||||
i.log(name, level, msg, args...)
|
||||
|
45
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
45
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
@ -9,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
//DefaultOutput is used as the default log output.
|
||||
// DefaultOutput is used as the default log output.
|
||||
DefaultOutput io.Writer = os.Stderr
|
||||
|
||||
// DefaultLevel is used as the default log level.
|
||||
@ -28,7 +31,7 @@ const (
|
||||
// of actions in code, such as function enters/exits, etc.
|
||||
Trace Level = 1
|
||||
|
||||
// Debug information for programmer lowlevel analysis.
|
||||
// Debug information for programmer low-level analysis.
|
||||
Debug Level = 2
|
||||
|
||||
// Info information about steady state operations.
|
||||
@ -44,13 +47,13 @@ const (
|
||||
Off Level = 6
|
||||
)
|
||||
|
||||
// Format is a simple convience type for when formatting is required. When
|
||||
// Format is a simple convenience type for when formatting is required. When
|
||||
// processing a value of this type, the logger automatically treats the first
|
||||
// argument as a Printf formatting string and passes the rest as the values
|
||||
// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
|
||||
type Format []interface{}
|
||||
|
||||
// Fmt returns a Format type. This is a convience function for creating a Format
|
||||
// Fmt returns a Format type. This is a convenience function for creating a Format
|
||||
// type.
|
||||
func Fmt(str string, args ...interface{}) Format {
|
||||
return append(Format{str}, args...)
|
||||
@ -89,6 +92,13 @@ const (
|
||||
ForceColor
|
||||
)
|
||||
|
||||
// SupportsColor is an optional interface that can be implemented by the output
|
||||
// value. If implemented and SupportsColor() returns true, then AutoColor will
|
||||
// enable colorization.
|
||||
type SupportsColor interface {
|
||||
SupportsColor() bool
|
||||
}
|
||||
|
||||
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
|
||||
// the level string is invalid. This facilitates setting the log level via
|
||||
// config or environment variable by name in a predictable way.
|
||||
@ -134,7 +144,7 @@ func (l Level) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// Logger describes the interface that must be implemeted by all loggers.
|
||||
// Logger describes the interface that must be implemented by all loggers.
|
||||
type Logger interface {
|
||||
// Args are alternating key, val pairs
|
||||
// keys must be strings
|
||||
@ -198,6 +208,9 @@ type Logger interface {
|
||||
// implementation cannot update the level on the fly, it should no-op.
|
||||
SetLevel(level Level)
|
||||
|
||||
// Returns the current level
|
||||
GetLevel() Level
|
||||
|
||||
// Return a value that conforms to the stdlib log.Logger interface
|
||||
StandardLogger(opts *StandardLoggerOptions) *log.Logger
|
||||
|
||||
@ -236,7 +249,7 @@ type LoggerOptions struct {
|
||||
// Name of the subsystem to prefix logs with
|
||||
Name string
|
||||
|
||||
// The threshold for the logger. Anything less severe is supressed
|
||||
// The threshold for the logger. Anything less severe is suppressed
|
||||
Level Level
|
||||
|
||||
// Where to write the logs to. Defaults to os.Stderr if nil
|
||||
@ -267,10 +280,17 @@ type LoggerOptions struct {
|
||||
// because setting TimeFormat to empty assumes the default format.
|
||||
DisableTime bool
|
||||
|
||||
// Color the output. On Windows, colored logs are only avaiable for io.Writers that
|
||||
// Color the output. On Windows, colored logs are only available for io.Writers that
|
||||
// are concretely instances of *os.File.
|
||||
Color ColorOption
|
||||
|
||||
// Only color the header, not the body. This can help with readability of long messages.
|
||||
ColorHeaderOnly bool
|
||||
|
||||
// Color the header and message body fields. This can help with readability
|
||||
// of long messages with multiple fields.
|
||||
ColorHeaderAndFields bool
|
||||
|
||||
// A function which is called with the log information and if it returns true the value
|
||||
// should not be logged.
|
||||
// This is useful when interacting with a system that you wish to suppress the log
|
||||
@ -279,9 +299,16 @@ type LoggerOptions struct {
|
||||
|
||||
// IndependentLevels causes subloggers to be created with an independent
|
||||
// copy of this logger's level. This means that using SetLevel on this
|
||||
// logger will not effect any subloggers, and SetLevel on any subloggers
|
||||
// will not effect the parent or sibling loggers.
|
||||
// logger will not affect any subloggers, and SetLevel on any subloggers
|
||||
// will not affect the parent or sibling loggers.
|
||||
IndependentLevels bool
|
||||
|
||||
// SubloggerHook registers a function that is called when a sublogger via
|
||||
// Named, With, or ResetNamed is created. If defined, the function is passed
|
||||
// the newly created Logger and the returned Logger is returned from the
|
||||
// original function. This option allows customization via interception and
|
||||
// wrapping of Logger instances.
|
||||
SubloggerHook func(sub Logger) Logger
|
||||
}
|
||||
|
||||
// InterceptLogger describes the interface for using a logger
|
||||
|
5
vendor/github.com/hashicorp/go-hclog/nulllogger.go
generated
vendored
5
vendor/github.com/hashicorp/go-hclog/nulllogger.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
@ -49,6 +52,8 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l }
|
||||
|
||||
func (l *nullLogger) SetLevel(level Level) {}
|
||||
|
||||
func (l *nullLogger) GetLevel() Level { return NoLevel }
|
||||
|
||||
func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
|
||||
return log.New(l.StandardWriter(opts), "", log.LstdFlags)
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/stdlog.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/writer.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/writer.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package hclog
|
||||
|
||||
import (
|
||||
|
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
Normal file
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
Normal file
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# UNRELEASED
|
||||
|
||||
# 1.3.0 (September 17th, 2020)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
|
||||
|
||||
# 1.2.0 (March 18th, 2020)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
|
||||
|
||||
# 1.1.0 (May 22nd, 2019)
|
||||
|
||||
FEATURES
|
||||
|
||||
* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
|
||||
|
||||
# 1.0.0 (August 30th, 2018)
|
||||
|
||||
* go mod adopted
|
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
Normal file
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
|
||||
=========
|
||||
|
||||
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
A tree supports using a transaction to batch multiple updates (insert, delete)
|
||||
in a more efficient manner than performing each operation one at a time.
|
||||
|
||||
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := iradix.New()
|
||||
r, _, _ = r.Insert([]byte("foo"), 1)
|
||||
r, _, _ = r.Insert([]byte("bar"), 2)
|
||||
r, _, _ = r.Insert([]byte("foobar"), 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
|
||||
if string(m) != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
||||
Here is an example of performing a range scan of the keys.
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := iradix.New()
|
||||
r, _, _ = r.Insert([]byte("001"), 1)
|
||||
r, _, _ = r.Insert([]byte("002"), 2)
|
||||
r, _, _ = r.Insert([]byte("005"), 5)
|
||||
r, _, _ = r.Insert([]byte("010"), 10)
|
||||
r, _, _ = r.Insert([]byte("100"), 10)
|
||||
|
||||
// Range scan over the keys that sort lexicographically between [003, 050)
|
||||
it := r.Root().Iterator()
|
||||
it.SeekLowerBound([]byte("003"))
|
||||
for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
|
||||
if key >= "050" {
|
||||
break
|
||||
}
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// 005
|
||||
// 010
|
||||
```
|
||||
|
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package iradix
|
||||
|
||||
import "sort"
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
Normal file
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
Normal file
@ -0,0 +1,676 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultModifiedCache is the default size of the modified node
|
||||
// cache used per transaction. This is used to cache the updates
|
||||
// to the nodes near the root, while the leaves do not need to be
|
||||
// cached. This is important for very large transactions to prevent
|
||||
// the modified cache from growing to be enormous. This is also used
|
||||
// to set the max size of the mutation notify maps since those should
|
||||
// also be bounded in a similar way.
|
||||
defaultModifiedCache = 8192
|
||||
)
|
||||
|
||||
// Tree implements an immutable radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over a standard
|
||||
// hash map is prefix-based lookups and ordered iteration. The immutability
|
||||
// means that it is safe to concurrently read from a Tree without any
|
||||
// coordination.
|
||||
type Tree struct {
|
||||
root *Node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
t := &Tree{
|
||||
root: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
},
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// Txn is a transaction on the tree. This transaction is applied
|
||||
// atomically and returns a new tree when committed. A transaction
|
||||
// is not thread safe, and should only be used by a single goroutine.
|
||||
type Txn struct {
|
||||
// root is the modified root for the transaction.
|
||||
root *Node
|
||||
|
||||
// snap is a snapshot of the root node for use if we have to run the
|
||||
// slow notify algorithm.
|
||||
snap *Node
|
||||
|
||||
// size tracks the size of the tree as it is modified during the
|
||||
// transaction.
|
||||
size int
|
||||
|
||||
// writable is a cache of writable nodes that have been created during
|
||||
// the course of the transaction. This allows us to re-use the same
|
||||
// nodes for further writes and avoid unnecessary copies of nodes that
|
||||
// have never been exposed outside the transaction. This will only hold
|
||||
// up to defaultModifiedCache number of entries.
|
||||
writable *simplelru.LRU
|
||||
|
||||
// trackChannels is used to hold channels that need to be notified to
|
||||
// signal mutation of the tree. This will only hold up to
|
||||
// defaultModifiedCache number of entries, after which we will set the
|
||||
// trackOverflow flag, which will cause us to use a more expensive
|
||||
// algorithm to perform the notifications. Mutation tracking is only
|
||||
// performed if trackMutate is true.
|
||||
trackChannels map[chan struct{}]struct{}
|
||||
trackOverflow bool
|
||||
trackMutate bool
|
||||
}
|
||||
|
||||
// Txn starts a new transaction that can be used to mutate the tree
|
||||
func (t *Tree) Txn() *Txn {
|
||||
txn := &Txn{
|
||||
root: t.root,
|
||||
snap: t.root,
|
||||
size: t.size,
|
||||
}
|
||||
return txn
|
||||
}
|
||||
|
||||
// Clone makes an independent copy of the transaction. The new transaction
|
||||
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
|
||||
func (t *Txn) Clone() *Txn {
|
||||
// reset the writable node cache to avoid leaking future writes into the clone
|
||||
t.writable = nil
|
||||
|
||||
txn := &Txn{
|
||||
root: t.root,
|
||||
snap: t.snap,
|
||||
size: t.size,
|
||||
}
|
||||
return txn
|
||||
}
|
||||
|
||||
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
|
||||
// then notifications will be issued for affected internal nodes and leaves when
|
||||
// the transaction is committed.
|
||||
func (t *Txn) TrackMutate(track bool) {
|
||||
t.trackMutate = track
|
||||
}
|
||||
|
||||
// trackChannel safely attempts to track the given mutation channel, setting the
|
||||
// overflow flag if we can no longer track any more. This limits the amount of
|
||||
// state that will accumulate during a transaction and we have a slower algorithm
|
||||
// to switch to if we overflow.
|
||||
func (t *Txn) trackChannel(ch chan struct{}) {
|
||||
// In overflow, make sure we don't store any more objects.
|
||||
if t.trackOverflow {
|
||||
return
|
||||
}
|
||||
|
||||
// If this would overflow the state we reject it and set the flag (since
|
||||
// we aren't tracking everything that's required any longer).
|
||||
if len(t.trackChannels) >= defaultModifiedCache {
|
||||
// Mark that we are in the overflow state
|
||||
t.trackOverflow = true
|
||||
|
||||
// Clear the map so that the channels can be garbage collected. It is
|
||||
// safe to do this since we have already overflowed and will be using
|
||||
// the slow notify algorithm.
|
||||
t.trackChannels = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Create the map on the fly when we need it.
|
||||
if t.trackChannels == nil {
|
||||
t.trackChannels = make(map[chan struct{}]struct{})
|
||||
}
|
||||
|
||||
// Otherwise we are good to track it.
|
||||
t.trackChannels[ch] = struct{}{}
|
||||
}
|
||||
|
||||
// writeNode returns a node to be modified, if the current node has already been
|
||||
// modified during the course of the transaction, it is used in-place. Set
|
||||
// forLeafUpdate to true if you are getting a write node to update the leaf,
|
||||
// which will set leaf mutation tracking appropriately as well.
|
||||
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
|
||||
// Ensure the writable set exists.
|
||||
if t.writable == nil {
|
||||
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.writable = lru
|
||||
}
|
||||
|
||||
// If this node has already been modified, we can continue to use it
|
||||
// during this transaction. We know that we don't need to track it for
|
||||
// a node update since the node is writable, but if this is for a leaf
|
||||
// update we track it, in case the initial write to this node didn't
|
||||
// update the leaf.
|
||||
if _, ok := t.writable.Get(n); ok {
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Copy the existing node. If you have set forLeafUpdate it will be
|
||||
// safe to replace this leaf with another after you get your node for
|
||||
// writing. You MUST replace it, because the channel associated with
|
||||
// this leaf will be closed when this transaction is committed.
|
||||
nc := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: n.leaf,
|
||||
}
|
||||
if n.prefix != nil {
|
||||
nc.prefix = make([]byte, len(n.prefix))
|
||||
copy(nc.prefix, n.prefix)
|
||||
}
|
||||
if len(n.edges) != 0 {
|
||||
nc.edges = make([]edge, len(n.edges))
|
||||
copy(nc.edges, n.edges)
|
||||
}
|
||||
|
||||
// Mark this node as writable.
|
||||
t.writable.Add(nc, nil)
|
||||
return nc
|
||||
}
|
||||
|
||||
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
|
||||
// Returns the size of the subtree visited
|
||||
func (t *Txn) trackChannelsAndCount(n *Node) int {
|
||||
// Count only leaf nodes
|
||||
leaves := 0
|
||||
if n.leaf != nil {
|
||||
leaves = 1
|
||||
}
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
leaves += t.trackChannelsAndCount(e.node)
|
||||
}
|
||||
return leaves
|
||||
}
|
||||
|
||||
// mergeChild is called to collapse the given node with its child. This is only
|
||||
// called when the given node is not a leaf and has a single edge.
|
||||
func (t *Txn) mergeChild(n *Node) {
|
||||
// Mark the child node as being mutated since we are about to abandon
|
||||
// it. We don't need to mark the leaf since we are retaining it if it
|
||||
// is there.
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
if t.trackMutate {
|
||||
t.trackChannel(child.mutateCh)
|
||||
}
|
||||
|
||||
// Merge the nodes.
|
||||
n.prefix = concat(n.prefix, child.prefix)
|
||||
n.leaf = child.leaf
|
||||
if len(child.edges) != 0 {
|
||||
n.edges = make([]edge, len(child.edges))
|
||||
copy(n.edges, child.edges)
|
||||
} else {
|
||||
n.edges = nil
|
||||
}
|
||||
}
|
||||
|
||||
// insert does a recursive insertion
|
||||
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
|
||||
// Handle key exhaustion
|
||||
if len(search) == 0 {
|
||||
var oldVal interface{}
|
||||
didUpdate := false
|
||||
if n.isLeaf() {
|
||||
oldVal = n.leaf.val
|
||||
didUpdate = true
|
||||
}
|
||||
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
idx, child := n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if child == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
nc := t.writeNode(n, false)
|
||||
nc.addEdge(e)
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, child.prefix)
|
||||
if commonPrefix == len(child.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
|
||||
if newChild != nil {
|
||||
nc := t.writeNode(n, false)
|
||||
nc.edges[idx].node = newChild
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
return nil, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Split the node
|
||||
nc := t.writeNode(n, false)
|
||||
splitNode := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
nc.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: splitNode,
|
||||
})
|
||||
|
||||
// Restore the existing child node
|
||||
modChild := t.writeNode(child, false)
|
||||
splitNode.addEdge(edge{
|
||||
label: modChild.prefix[commonPrefix],
|
||||
node: modChild,
|
||||
})
|
||||
modChild.prefix = modChild.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
splitNode.leaf = leaf
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
splitNode.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
return nil, nil
|
||||
}
|
||||
// Copy the pointer in case we are in a transaction that already
|
||||
// modified this node since the node will be reused. Any changes
|
||||
// made to the node will not affect returning the original leaf
|
||||
// value.
|
||||
oldLeaf := n.leaf
|
||||
|
||||
// Remove the leaf node
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = nil
|
||||
|
||||
// Check if this node should be merged
|
||||
if n != t.root && len(nc.edges) == 1 {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
return nc, oldLeaf
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
if child == nil || !bytes.HasPrefix(search, child.prefix) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
search = search[len(child.prefix):]
|
||||
newChild, leaf := t.delete(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, leaf
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
nc := t.writeNode(n, true)
|
||||
if n.isLeaf() {
|
||||
nc.leaf = nil
|
||||
}
|
||||
nc.edges = nil
|
||||
return nc, t.trackChannelsAndCount(n)
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
|
||||
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
|
||||
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if len(child.prefix) > len(search) {
|
||||
search = []byte("")
|
||||
} else {
|
||||
search = search[len(child.prefix):]
|
||||
}
|
||||
newChild, numDeletions := t.deletePrefix(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, 0
|
||||
}
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, numDeletions
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the previous value and a bool indicating if any was set.
|
||||
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
|
||||
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if !didUpdate {
|
||||
t.size++
|
||||
}
|
||||
return oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the old value if any,
|
||||
// and a bool indicating if the key was set.
|
||||
func (t *Txn) Delete(k []byte) (interface{}, bool) {
|
||||
newRoot, leaf := t.delete(nil, t.root, k)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if leaf != nil {
|
||||
t.size--
|
||||
return leaf.val, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete an entire subtree that matches the prefix
|
||||
// This will delete all nodes under that prefix
|
||||
func (t *Txn) DeletePrefix(prefix []byte) bool {
|
||||
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
t.size = t.size - numDeletions
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
// Root returns the current root of the radix tree within this
|
||||
// transaction. The root is not safe across insert and delete operations,
|
||||
// but can be used to read the current state during a transaction.
|
||||
func (t *Txn) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Txn) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// GetWatch is used to lookup a specific key, returning
|
||||
// the watch channel, value and if it was found
|
||||
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
return t.root.GetWatch(k)
|
||||
}
|
||||
|
||||
// Commit is used to finalize the transaction and return a new tree. If mutation
|
||||
// tracking is turned on then notifications will also be issued.
|
||||
func (t *Txn) Commit() *Tree {
|
||||
nt := t.CommitOnly()
|
||||
if t.trackMutate {
|
||||
t.Notify()
|
||||
}
|
||||
return nt
|
||||
}
|
||||
|
||||
// CommitOnly is used to finalize the transaction and return a new tree, but
|
||||
// does not issue any notifications until Notify is called.
|
||||
func (t *Txn) CommitOnly() *Tree {
|
||||
nt := &Tree{t.root, t.size}
|
||||
t.writable = nil
|
||||
return nt
|
||||
}
|
||||
|
||||
// slowNotify does a complete comparison of the before and after trees in order
|
||||
// to trigger notifications. This doesn't require any additional state but it
|
||||
// is very expensive to compute.
|
||||
func (t *Txn) slowNotify() {
|
||||
snapIter := t.snap.rawIterator()
|
||||
rootIter := t.root.rawIterator()
|
||||
for snapIter.Front() != nil || rootIter.Front() != nil {
|
||||
// If we've exhausted the nodes in the old snapshot, we know
|
||||
// there's nothing remaining to notify.
|
||||
if snapIter.Front() == nil {
|
||||
return
|
||||
}
|
||||
snapElem := snapIter.Front()
|
||||
|
||||
// If we've exhausted the nodes in the new root, we know we need
|
||||
// to invalidate everything that remains in the old snapshot. We
|
||||
// know from the loop condition there's something in the old
|
||||
// snapshot.
|
||||
if rootIter.Front() == nil {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// Do one string compare so we can check the various conditions
|
||||
// below without repeating the compare.
|
||||
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
|
||||
|
||||
// If the snapshot is behind the root, then we must have deleted
|
||||
// this node during the transaction.
|
||||
if cmp < 0 {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If the snapshot is ahead of the root, then we must have added
|
||||
// this node during the transaction.
|
||||
if cmp > 0 {
|
||||
rootIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If we have the same path, then we need to see if we mutated a
|
||||
// node and possibly the leaf.
|
||||
rootElem := rootIter.Front()
|
||||
if snapElem != rootElem {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
}
|
||||
snapIter.Next()
|
||||
rootIter.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is used along with TrackMutate to trigger notifications. This must
|
||||
// only be done once a transaction is committed via CommitOnly, and it is called
|
||||
// automatically by Commit.
|
||||
func (t *Txn) Notify() {
|
||||
if !t.trackMutate {
|
||||
return
|
||||
}
|
||||
|
||||
// If we've overflowed the tracking state we can't use it in any way and
|
||||
// need to do a full tree compare.
|
||||
if t.trackOverflow {
|
||||
t.slowNotify()
|
||||
} else {
|
||||
for ch := range t.trackChannels {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the tracking state so that a re-notify is safe (will trigger
|
||||
// the else clause above which will be a no-op).
|
||||
t.trackChannels = nil
|
||||
t.trackOverflow = false
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the new tree, previous value and a bool indicating if any was set.
|
||||
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Insert(k, v)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the new tree,
|
||||
// old value if any, and a bool indicating if the key was set.
|
||||
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Delete(k)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
|
||||
// and a bool indicating if the prefix matched any nodes
|
||||
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
|
||||
txn := t.Txn()
|
||||
ok := txn.DeletePrefix(k)
|
||||
return txn.Commit(), ok
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree which can be used for richer
|
||||
// query operations.
|
||||
func (t *Tree) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 []byte) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// concat two byte slices, returning a third new copy
|
||||
func concat(a, b []byte) []byte {
|
||||
c := make([]byte, len(a)+len(b))
|
||||
copy(c, a)
|
||||
copy(c[len(a):], b)
|
||||
return c
|
||||
}
|
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
Normal file
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Iterator is used to iterate over a set of nodes
|
||||
// in pre-order
|
||||
type Iterator struct {
|
||||
node *Node
|
||||
stack []edges
|
||||
}
|
||||
|
||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
||||
// and returns the watch channel of the finest granularity
|
||||
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
||||
// Wipe the stack
|
||||
i.stack = nil
|
||||
n := i.node
|
||||
watch = n.mutateCh
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
i.node = n
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
i.node = n
|
||||
return
|
||||
} else {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekPrefix is used to seek the iterator to a given prefix
|
||||
func (i *Iterator) SeekPrefix(prefix []byte) {
|
||||
i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
func (i *Iterator) recurseMin(n *Node) *Node {
|
||||
// Traverse to the minimum child
|
||||
if n.leaf != nil {
|
||||
return n
|
||||
}
|
||||
nEdges := len(n.edges)
|
||||
if nEdges > 1 {
|
||||
// Add all the other edges to the stack (the min node will be added as
|
||||
// we recurse)
|
||||
i.stack = append(i.stack, n.edges[1:])
|
||||
}
|
||||
if nEdges > 0 {
|
||||
return i.recurseMin(n.edges[0].node)
|
||||
}
|
||||
// Shouldn't be possible
|
||||
return nil
|
||||
}
|
||||
|
||||
// SeekLowerBound is used to seek the iterator to the smallest key that is
|
||||
// greater or equal to the given key. There is no watch variant as it's hard to
|
||||
// predict based on the radix structure which node(s) changes might affect the
|
||||
// result.
|
||||
func (i *Iterator) SeekLowerBound(key []byte) {
|
||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
||||
// go because we need only a subset of edges of many nodes in the path to the
|
||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
||||
// children that we don't traverse on the way to the reverse lower bound as it
|
||||
// walks the stack.
|
||||
i.stack = []edges{}
|
||||
// i.node starts off in the common case as pointing to the root node of the
|
||||
// tree. By the time we return we have either found a lower bound and setup
|
||||
// the stack to traverse all larger keys, or we have not and the stack and
|
||||
// node should both be nil to prevent the iterator from assuming it is just
|
||||
// iterating the whole tree from the root node. Either way this needs to end
|
||||
// up as nil so just set it here.
|
||||
n := i.node
|
||||
i.node = nil
|
||||
search := key
|
||||
|
||||
found := func(n *Node) {
|
||||
i.stack = append(i.stack, edges{edge{node: n}})
|
||||
}
|
||||
|
||||
findMin := func(n *Node) {
|
||||
n = i.recurseMin(n)
|
||||
if n != nil {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Compare current prefix with the search key's same-length prefix.
|
||||
var prefixCmp int
|
||||
if len(n.prefix) < len(search) {
|
||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
||||
} else {
|
||||
prefixCmp = bytes.Compare(n.prefix, search)
|
||||
}
|
||||
|
||||
if prefixCmp > 0 {
|
||||
// Prefix is larger, that means the lower bound is greater than the search
|
||||
// and from now on we need to follow the minimum path to the smallest
|
||||
// leaf under this subtree.
|
||||
findMin(n)
|
||||
return
|
||||
}
|
||||
|
||||
if prefixCmp < 0 {
|
||||
// Prefix is smaller than search prefix, that means there is no lower
|
||||
// bound
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Prefix is equal, we are still heading for an exact match. If this is a
|
||||
// leaf and an exact match we're done.
|
||||
if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix if the current node has one. Note that this is
|
||||
// safe because if n.prefix is longer than the search slice prefixCmp would
|
||||
// have been > 0 above and the method would have already returned.
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
if len(search) == 0 {
|
||||
// We've exhausted the search key, but the current node is not an exact
|
||||
// match or not a leaf. That means that the leaf value if it exists, and
|
||||
// all child nodes must be strictly greater, the smallest key in this
|
||||
// subtree must be the lower bound.
|
||||
findMin(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, take the lower bound next edge.
|
||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
||||
if lbNode == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Create stack edges for the all strictly higher edges in this node.
|
||||
if idx+1 < len(n.edges) {
|
||||
i.stack = append(i.stack, n.edges[idx+1:])
|
||||
}
|
||||
|
||||
// Recurse
|
||||
n = lbNode
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next node in order
|
||||
func (i *Iterator) Next() ([]byte, interface{}, bool) {
|
||||
// Initialize our stack if needed
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []edges{
|
||||
{
|
||||
edge{node: i.node},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last[0].node
|
||||
|
||||
// Update the stack
|
||||
if len(last) > 1 {
|
||||
i.stack[n-1] = last[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier
|
||||
if len(elem.edges) > 0 {
|
||||
i.stack = append(i.stack, elem.edges)
|
||||
}
|
||||
|
||||
// Return the leaf values if any
|
||||
if elem.leaf != nil {
|
||||
return elem.leaf.key, elem.leaf.val, true
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
Normal file
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
Normal file
@ -0,0 +1,334 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(k []byte, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
mutateCh chan struct{}
|
||||
key []byte
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Node is an immutable node in the radix tree
|
||||
type Node struct {
|
||||
// mutateCh is closed if this node is modified
|
||||
mutateCh chan struct{}
|
||||
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix []byte
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *Node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *Node) addEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
n.edges = append(n.edges, e)
|
||||
if idx != num {
|
||||
copy(n.edges[idx+1:], n.edges[idx:num])
|
||||
n.edges[idx] = e
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *Node) getEdge(label byte) (int, *Node) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return idx, n.edges[idx].node
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
// we want lower bound behavior so return even if it's not an exact match
|
||||
if idx < num {
|
||||
return idx, n.edges[idx].node
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (n *Node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
search := k
|
||||
watch := n.mutateCh
|
||||
for {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.mutateCh, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return watch, nil, false
|
||||
}
|
||||
|
||||
func (n *Node) Get(k []byte) (interface{}, bool) {
|
||||
_, val, ok := n.GetWatch(k)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
|
||||
var last *leafNode
|
||||
search := k
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (n *Node) Minimum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (n *Node) Maximum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Iterator is used to return an iterator at
|
||||
// the given node to walk the tree
|
||||
func (n *Node) Iterator() *Iterator {
|
||||
return &Iterator{node: n}
|
||||
}
|
||||
|
||||
// ReverseIterator is used to return an iterator at
|
||||
// the given node to walk the tree backwards
|
||||
func (n *Node) ReverseIterator() *ReverseIterator {
|
||||
return NewReverseIterator(n)
|
||||
}
|
||||
|
||||
// rawIterator is used to return a raw iterator at the given node to walk the
|
||||
// tree.
|
||||
func (n *Node) rawIterator() *rawIterator {
|
||||
iter := &rawIterator{node: n}
|
||||
iter.Next()
|
||||
return iter
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (n *Node) Walk(fn WalkFn) {
|
||||
recursiveWalk(n, fn)
|
||||
}
|
||||
|
||||
// WalkBackwards is used to walk the tree in reverse order
|
||||
func (n *Node) WalkBackwards(fn WalkFn) {
|
||||
reverseRecursiveWalk(n, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (n *Node) WalkPath(path []byte, fn WalkFn) {
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *Node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// reverseRecursiveWalk is used to do a reverse pre-order
|
||||
// walk of a node recursively. Returns true if the walk
|
||||
// should be aborted
|
||||
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children in reverse order
|
||||
for i := len(n.edges) - 1; i >= 0; i-- {
|
||||
e := n.edges[i]
|
||||
if reverseRecursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
Normal file
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package iradix
|
||||
|
||||
// rawIterator visits each of the nodes in the tree, even the ones that are not
|
||||
// leaves. It keeps track of the effective path (what a leaf at a given node
|
||||
// would be called), which is useful for comparing trees.
|
||||
type rawIterator struct {
|
||||
// node is the starting node in the tree for the iterator.
|
||||
node *Node
|
||||
|
||||
// stack keeps track of edges in the frontier.
|
||||
stack []rawStackEntry
|
||||
|
||||
// pos is the current position of the iterator.
|
||||
pos *Node
|
||||
|
||||
// path is the effective path of the current iterator position,
|
||||
// regardless of whether the current node is a leaf.
|
||||
path string
|
||||
}
|
||||
|
||||
// rawStackEntry is used to keep track of the cumulative common path as well as
|
||||
// its associated edges in the frontier.
|
||||
type rawStackEntry struct {
|
||||
path string
|
||||
edges edges
|
||||
}
|
||||
|
||||
// Front returns the current node that has been iterated to.
|
||||
func (i *rawIterator) Front() *Node {
|
||||
return i.pos
|
||||
}
|
||||
|
||||
// Path returns the effective path of the current node, even if it's not actually
|
||||
// a leaf.
|
||||
func (i *rawIterator) Path() string {
|
||||
return i.path
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next node.
|
||||
func (i *rawIterator) Next() {
|
||||
// Initialize our stack if needed.
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []rawStackEntry{
|
||||
{
|
||||
edges: edges{
|
||||
edge{node: i.node},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack.
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last.edges[0].node
|
||||
|
||||
// Update the stack.
|
||||
if len(last.edges) > 1 {
|
||||
i.stack[n-1].edges = last.edges[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier.
|
||||
if len(elem.edges) > 0 {
|
||||
path := last.path + string(elem.prefix)
|
||||
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
|
||||
}
|
||||
|
||||
i.pos = elem
|
||||
i.path = last.path + string(elem.prefix)
|
||||
return
|
||||
}
|
||||
|
||||
i.pos = nil
|
||||
i.path = ""
|
||||
}
|
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
Normal file
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// ReverseIterator is used to iterate over a set of nodes
|
||||
// in reverse in-order
|
||||
type ReverseIterator struct {
|
||||
i *Iterator
|
||||
|
||||
// expandedParents stores the set of parent nodes whose relevant children have
|
||||
// already been pushed into the stack. This can happen during seek or during
|
||||
// iteration.
|
||||
//
|
||||
// Unlike forward iteration we need to recurse into children before we can
|
||||
// output the value stored in an internal leaf since all children are greater.
|
||||
// We use this to track whether we have already ensured all the children are
|
||||
// in the stack.
|
||||
expandedParents map[*Node]struct{}
|
||||
}
|
||||
|
||||
// NewReverseIterator returns a new ReverseIterator at a node
|
||||
func NewReverseIterator(n *Node) *ReverseIterator {
|
||||
return &ReverseIterator{
|
||||
i: &Iterator{node: n},
|
||||
}
|
||||
}
|
||||
|
||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
||||
// and returns the watch channel of the finest granularity
|
||||
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
||||
return ri.i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
// SeekPrefix is used to seek the iterator to a given prefix
|
||||
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
|
||||
ri.i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
|
||||
// lower or equal to the given key. There is no watch variant as it's hard to
|
||||
// predict based on the radix structure which node(s) changes might affect the
|
||||
// result.
|
||||
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
|
||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
||||
// go because we need only a subset of edges of many nodes in the path to the
|
||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
||||
// children that we don't traverse on the way to the reverse lower bound as it
|
||||
// walks the stack.
|
||||
ri.i.stack = []edges{}
|
||||
// ri.i.node starts off in the common case as pointing to the root node of the
|
||||
// tree. By the time we return we have either found a lower bound and setup
|
||||
// the stack to traverse all larger keys, or we have not and the stack and
|
||||
// node should both be nil to prevent the iterator from assuming it is just
|
||||
// iterating the whole tree from the root node. Either way this needs to end
|
||||
// up as nil so just set it here.
|
||||
n := ri.i.node
|
||||
ri.i.node = nil
|
||||
search := key
|
||||
|
||||
if ri.expandedParents == nil {
|
||||
ri.expandedParents = make(map[*Node]struct{})
|
||||
}
|
||||
|
||||
found := func(n *Node) {
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
// We need to mark this node as expanded in advance too otherwise the
|
||||
// iterator will attempt to walk all of its children even though they are
|
||||
// greater than the lower bound we have found. We've expanded it in the
|
||||
// sense that all of its children that we want to walk are already in the
|
||||
// stack (i.e. none of them).
|
||||
ri.expandedParents[n] = struct{}{}
|
||||
}
|
||||
|
||||
for {
|
||||
// Compare current prefix with the search key's same-length prefix.
|
||||
var prefixCmp int
|
||||
if len(n.prefix) < len(search) {
|
||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
||||
} else {
|
||||
prefixCmp = bytes.Compare(n.prefix, search)
|
||||
}
|
||||
|
||||
if prefixCmp < 0 {
|
||||
// Prefix is smaller than search prefix, that means there is no exact
|
||||
// match for the search key. But we are looking in reverse, so the reverse
|
||||
// lower bound will be the largest leaf under this subtree, since it is
|
||||
// the value that would come right before the current search key if it
|
||||
// were in the tree. So we need to follow the maximum path in this subtree
|
||||
// to find it. Note that this is exactly what the iterator will already do
|
||||
// if it finds a node in the stack that has _not_ been marked as expanded
|
||||
// so in this one case we don't call `found` and instead let the iterator
|
||||
// do the expansion and recursion through all the children.
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
return
|
||||
}
|
||||
|
||||
if prefixCmp > 0 {
|
||||
// Prefix is larger than search prefix, or there is no prefix but we've
|
||||
// also exhausted the search key. Either way, that means there is no
|
||||
// reverse lower bound since nothing comes before our current search
|
||||
// prefix.
|
||||
return
|
||||
}
|
||||
|
||||
// If this is a leaf, something needs to happen! Note that if it's a leaf
|
||||
// and prefixCmp was zero (which it must be to get here) then the leaf value
|
||||
// is either an exact match for the search, or it's lower. It can't be
|
||||
// greater.
|
||||
if n.isLeaf() {
|
||||
|
||||
// Firstly, if it's an exact match, we're done!
|
||||
if bytes.Equal(n.leaf.key, key) {
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// It's not so this node's leaf value must be lower and could still be a
|
||||
// valid contender for reverse lower bound.
|
||||
|
||||
// If it has no children then we are also done.
|
||||
if len(n.edges) == 0 {
|
||||
// This leaf is the lower bound.
|
||||
found(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Finally, this leaf is internal (has children) so we'll keep searching,
|
||||
// but we need to add it to the iterator's stack since it has a leaf value
|
||||
// that needs to be iterated over. It needs to be added to the stack
|
||||
// before its children below as it comes first.
|
||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
||||
// We also need to mark it as expanded since we'll be adding any of its
|
||||
// relevant children below and so don't want the iterator to re-add them
|
||||
// on its way back up the stack.
|
||||
ri.expandedParents[n] = struct{}{}
|
||||
}
|
||||
|
||||
// Consume the search prefix. Note that this is safe because if n.prefix is
|
||||
// longer than the search slice prefixCmp would have been > 0 above and the
|
||||
// method would have already returned.
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
if len(search) == 0 {
|
||||
// We've exhausted the search key but we are not at a leaf. That means all
|
||||
// children are greater than the search key so a reverse lower bound
|
||||
// doesn't exist in this subtree. Note that there might still be one in
|
||||
// the whole radix tree by following a different path somewhere further
|
||||
// up. If that's the case then the iterator's stack will contain all the
|
||||
// smaller nodes already and Previous will walk through them correctly.
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, take the lower bound next edge.
|
||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
||||
|
||||
// From here, we need to update the stack with all values lower than
|
||||
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
|
||||
// search prefix is larger than all edges, we need to place idx at the
|
||||
// last edge index so they can all be place in the stack, since they
|
||||
// come before our search prefix.
|
||||
if idx == -1 {
|
||||
idx = len(n.edges)
|
||||
}
|
||||
|
||||
// Create stack edges for the all strictly lower edges in this node.
|
||||
if len(n.edges[:idx]) > 0 {
|
||||
ri.i.stack = append(ri.i.stack, n.edges[:idx])
|
||||
}
|
||||
|
||||
// Exit if there's no lower bound edge. The stack will have the previous
|
||||
// nodes already.
|
||||
if lbNode == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Recurse
|
||||
n = lbNode
|
||||
}
|
||||
}
|
||||
|
||||
// Previous returns the previous node in reverse order
|
||||
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
|
||||
// Initialize our stack if needed
|
||||
if ri.i.stack == nil && ri.i.node != nil {
|
||||
ri.i.stack = []edges{
|
||||
{
|
||||
edge{node: ri.i.node},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if ri.expandedParents == nil {
|
||||
ri.expandedParents = make(map[*Node]struct{})
|
||||
}
|
||||
|
||||
for len(ri.i.stack) > 0 {
|
||||
// Inspect the last element of the stack
|
||||
n := len(ri.i.stack)
|
||||
last := ri.i.stack[n-1]
|
||||
m := len(last)
|
||||
elem := last[m-1].node
|
||||
|
||||
_, alreadyExpanded := ri.expandedParents[elem]
|
||||
|
||||
// If this is an internal node and we've not seen it already, we need to
|
||||
// leave it in the stack so we can return its possible leaf value _after_
|
||||
// we've recursed through all its children.
|
||||
if len(elem.edges) > 0 && !alreadyExpanded {
|
||||
// record that we've seen this node!
|
||||
ri.expandedParents[elem] = struct{}{}
|
||||
// push child edges onto stack and skip the rest of the loop to recurse
|
||||
// into the largest one.
|
||||
ri.i.stack = append(ri.i.stack, elem.edges)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the node from the stack
|
||||
if m > 1 {
|
||||
ri.i.stack[n-1] = last[:m-1]
|
||||
} else {
|
||||
ri.i.stack = ri.i.stack[:n-1]
|
||||
}
|
||||
// We don't need this state any more as it's no longer in the stack so we
|
||||
// won't visit it again
|
||||
if alreadyExpanded {
|
||||
delete(ri.expandedParents, elem)
|
||||
}
|
||||
|
||||
// If this is a leaf, return it
|
||||
if elem.leaf != nil {
|
||||
return elem.leaf.key, elem.leaf.val, true
|
||||
}
|
||||
|
||||
// it's not a leaf so keep walking the stack to find the previous leaf
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
19
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
19
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
@ -45,6 +45,25 @@ The returned response object is an `*http.Response`, the same thing you would
|
||||
usually get from `net/http`. Had the request failed one or more times, the above
|
||||
call would block and retry with exponential backoff.
|
||||
|
||||
## Retrying cases that fail after a seeming success
|
||||
|
||||
It's possible for a request to succeed in the sense that the expected response headers are received, but then to encounter network-level errors while reading the response body. In go-retryablehttp's most basic usage, this error would not be retryable, due to the out-of-band handling of the response body. In some cases it may be desirable to handle the response body as part of the retryable operation.
|
||||
|
||||
A toy example (which will retry the full request and succeed on the second attempt) is shown below:
|
||||
|
||||
```go
|
||||
c := retryablehttp.NewClient()
|
||||
r := retryablehttp.NewRequest("GET", "://foo", nil)
|
||||
handlerShouldRetry := true
|
||||
r.SetResponseHandler(func(*http.Response) error {
|
||||
if !handlerShouldRetry {
|
||||
return nil
|
||||
}
|
||||
handlerShouldRetry = false
|
||||
return errors.New("retryable error")
|
||||
})
|
||||
```
|
||||
|
||||
## Getting a stdlib `*http.Client` with retries
|
||||
|
||||
It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`.
|
||||
|
85
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
85
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
@ -69,11 +69,21 @@ var (
|
||||
// scheme specified in the URL is invalid. This error isn't typed
|
||||
// specifically so we resort to matching on the error string.
|
||||
schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`)
|
||||
|
||||
// A regular expression to match the error returned by net/http when the
|
||||
// TLS certificate is not trusted. This error isn't typed
|
||||
// specifically so we resort to matching on the error string.
|
||||
notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`)
|
||||
)
|
||||
|
||||
// ReaderFunc is the type of function that can be given natively to NewRequest
|
||||
type ReaderFunc func() (io.Reader, error)
|
||||
|
||||
// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it.
|
||||
// It only runs if the initial part of the request was successful.
|
||||
// If an error is returned, the client's retry policy will be used to determine whether to retry the whole request.
|
||||
type ResponseHandlerFunc func(*http.Response) error
|
||||
|
||||
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
||||
// for automatically sending the right Content-Length header when possible.
|
||||
type LenReader interface {
|
||||
@ -86,6 +96,8 @@ type Request struct {
|
||||
// used to rewind the request data in between retries.
|
||||
body ReaderFunc
|
||||
|
||||
responseHandler ResponseHandlerFunc
|
||||
|
||||
// Embed an HTTP request directly. This makes a *Request act exactly
|
||||
// like an *http.Request so that all meta methods are supported.
|
||||
*http.Request
|
||||
@ -94,8 +106,16 @@ type Request struct {
|
||||
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
|
||||
// with its context changed to ctx. The provided ctx must be non-nil.
|
||||
func (r *Request) WithContext(ctx context.Context) *Request {
|
||||
r.Request = r.Request.WithContext(ctx)
|
||||
return r
|
||||
return &Request{
|
||||
body: r.body,
|
||||
responseHandler: r.responseHandler,
|
||||
Request: r.Request.WithContext(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// SetResponseHandler allows setting the response handler.
|
||||
func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) {
|
||||
r.responseHandler = fn
|
||||
}
|
||||
|
||||
// BodyBytes allows accessing the request body. It is an analogue to
|
||||
@ -252,23 +272,31 @@ func FromRequest(r *http.Request) (*Request, error) {
|
||||
return nil, err
|
||||
}
|
||||
// Could assert contentLength == r.ContentLength
|
||||
return &Request{bodyReader, r}, nil
|
||||
return &Request{body: bodyReader, Request: r}, nil
|
||||
}
|
||||
|
||||
// NewRequest creates a new wrapped request.
|
||||
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
|
||||
return NewRequestWithContext(context.Background(), method, url, rawBody)
|
||||
}
|
||||
|
||||
// NewRequestWithContext creates a new wrapped request with the provided context.
|
||||
//
|
||||
// The context controls the entire lifetime of a request and its response:
|
||||
// obtaining a connection, sending the request, and reading the response headers and body.
|
||||
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
|
||||
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest(method, url, nil)
|
||||
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpReq.ContentLength = contentLength
|
||||
|
||||
return &Request{bodyReader, httpReq}, nil
|
||||
return &Request{body: bodyReader, Request: httpReq}, nil
|
||||
}
|
||||
|
||||
// Logger interface allows to use other loggers than
|
||||
@ -435,6 +463,9 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
// Don't retry if the error was due to TLS cert verification failure.
|
||||
if notTrustedErrorRe.MatchString(v.Error()) {
|
||||
return false, v
|
||||
}
|
||||
if _, ok := v.Err.(x509.UnknownAuthorityError); ok {
|
||||
return false, v
|
||||
}
|
||||
@ -455,7 +486,7 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
// the server time to recover, as 500's are typically not permanent
|
||||
// errors and may relate to outages on the server side. This will catch
|
||||
// invalid response codes as well, like 0 and 999.
|
||||
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
|
||||
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) {
|
||||
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
@ -555,13 +586,12 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var attempt int
|
||||
var shouldRetry bool
|
||||
var doErr, checkErr error
|
||||
var doErr, respErr, checkErr error
|
||||
|
||||
for i := 0; ; i++ {
|
||||
doErr, respErr = nil, nil
|
||||
attempt++
|
||||
|
||||
var code int // HTTP response code
|
||||
|
||||
// Always rewind the request body when non-nil.
|
||||
if req.body != nil {
|
||||
body, err := req.body()
|
||||
@ -589,19 +619,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
|
||||
// Attempt the request
|
||||
resp, doErr = c.HTTPClient.Do(req.Request)
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should continue with retries.
|
||||
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
|
||||
if !shouldRetry && doErr == nil && req.responseHandler != nil {
|
||||
respErr = req.responseHandler(resp)
|
||||
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr)
|
||||
}
|
||||
|
||||
if doErr != nil {
|
||||
err := doErr
|
||||
if respErr != nil {
|
||||
err = respErr
|
||||
}
|
||||
if err != nil {
|
||||
switch v := logger.(type) {
|
||||
case LeveledLogger:
|
||||
v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL)
|
||||
v.Error("request failed", "error", err, "method", req.Method, "url", req.URL)
|
||||
case Logger:
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr)
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||
}
|
||||
} else {
|
||||
// Call this here to maintain the behavior of logging all requests,
|
||||
@ -636,11 +671,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
|
||||
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
|
||||
if code > 0 {
|
||||
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
||||
}
|
||||
if logger != nil {
|
||||
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
|
||||
if resp != nil {
|
||||
desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode)
|
||||
}
|
||||
switch v := logger.(type) {
|
||||
case LeveledLogger:
|
||||
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
|
||||
@ -648,11 +683,13 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||
}
|
||||
}
|
||||
timer := time.NewTimer(wait)
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
c.HTTPClient.CloseIdleConnections()
|
||||
return nil, req.Context().Err()
|
||||
case <-time.After(wait):
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
// Make shallow copy of http Request so that we can modify its body
|
||||
@ -662,15 +699,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
// this is the closest we have to success criteria
|
||||
if doErr == nil && checkErr == nil && !shouldRetry {
|
||||
if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
defer c.HTTPClient.CloseIdleConnections()
|
||||
|
||||
err := doErr
|
||||
var err error
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
} else if respErr != nil {
|
||||
err = respErr
|
||||
} else {
|
||||
err = doErr
|
||||
}
|
||||
|
||||
if c.ErrorHandler != nil {
|
||||
|
27
vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go
generated
vendored
27
vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go
generated
vendored
@ -9,7 +9,10 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrNotAUrl = errors.New("not a url")
|
||||
var (
|
||||
ErrNotAUrl = errors.New("not a url")
|
||||
ErrNotParsed = errors.New("not a parsed value")
|
||||
)
|
||||
|
||||
// ParsePath parses a URL with schemes file://, env://, or any other. Depending
|
||||
// on the scheme it will return specific types of data:
|
||||
@ -18,7 +21,10 @@ var ErrNotAUrl = errors.New("not a url")
|
||||
//
|
||||
// * env:// will return a string with the env var's contents
|
||||
//
|
||||
// * Anything else will return the string as it was
|
||||
// * Anything else will return the string as it was. Functionally this means
|
||||
// anything for which Go's `url.Parse` function does not throw an error. If you
|
||||
// want to ensure that this function errors if a known scheme is not found, use
|
||||
// MustParsePath.
|
||||
//
|
||||
// On error, we return the original string along with the error. The caller can
|
||||
// switch on errors.Is(err, ErrNotAUrl) to understand whether it was the parsing
|
||||
@ -26,6 +32,16 @@ var ErrNotAUrl = errors.New("not a url")
|
||||
// useful to attempt to read a non-URL string from some resource, but where the
|
||||
// original input may simply be a valid string of that type.
|
||||
func ParsePath(path string) (string, error) {
|
||||
return parsePath(path, false)
|
||||
}
|
||||
|
||||
// MustParsePath behaves like ParsePath but will return ErrNotAUrl if the value
|
||||
// is not a URL with a scheme that can be parsed by this function.
|
||||
func MustParsePath(path string) (string, error) {
|
||||
return parsePath(path, true)
|
||||
}
|
||||
|
||||
func parsePath(path string, mustParse bool) (string, error) {
|
||||
path = strings.TrimSpace(path)
|
||||
parsed, err := url.Parse(path)
|
||||
if err != nil {
|
||||
@ -40,7 +56,10 @@ func ParsePath(path string) (string, error) {
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
case "env":
|
||||
return strings.TrimSpace(os.Getenv(strings.TrimPrefix(path, "env://"))), nil
|
||||
default:
|
||||
if mustParse {
|
||||
return "", ErrNotParsed
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
var ent *list.Element
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package simplelru
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
type LRUCache interface {
|
||||
// Adds a value to the cache, returns true if an eviction occurred and
|
||||
// updates the "recently used"-ness of the key.
|
||||
Add(key, value interface{}) bool
|
||||
|
||||
// Returns key's value from the cache and
|
||||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Removes a key from the cache.
|
||||
Remove(key interface{}) bool
|
||||
|
||||
// Removes the oldest entry from cache.
|
||||
RemoveOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns the oldest entry from the cache. #key, value, isFound
|
||||
GetOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []interface{}
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clears all cache entries.
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
12
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
12
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
@ -632,12 +632,16 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
// fill unusedNodeKeys with keys from the AST
|
||||
// a slice because we have to do equals case fold to match Filter
|
||||
unusedNodeKeys := make(map[string][]token.Pos, 0)
|
||||
for _, item := range list.Items {
|
||||
for _, k := range item.Keys{
|
||||
if k.Token.JSON || k.Token.Type == token.IDENT {
|
||||
for i, item := range list.Items {
|
||||
for _, k := range item.Keys {
|
||||
// isNestedJSON returns true for e.g. bar in
|
||||
// { "foo": { "bar": {...} } }
|
||||
// This isn't an unused node key, so we want to skip it
|
||||
isNestedJSON := i > 0 && len(item.Keys) > 1
|
||||
if !isNestedJSON && (k.Token.JSON || k.Token.Type == token.IDENT) {
|
||||
fn := k.Token.Value().(string)
|
||||
sl := unusedNodeKeys[fn]
|
||||
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
|
||||
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
2
vendor/github.com/hashicorp/vault/LICENSE
generated
vendored
@ -1,3 +1,5 @@
|
||||
Copyright (c) 2015 HashiCorp, Inc.
|
||||
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
3
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/auth_token.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/auth_token.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
13
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
13
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -200,6 +203,7 @@ type Config struct {
|
||||
// commands such as 'vault operator raft snapshot' as this redirects to the
|
||||
// primary node.
|
||||
DisableRedirects bool
|
||||
clientTLSConfig *tls.Config
|
||||
}
|
||||
|
||||
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
|
||||
@ -334,10 +338,17 @@ func (c *Config) configureTLS(t *TLSConfig) error {
|
||||
if t.TLSServerName != "" {
|
||||
clientTLSConfig.ServerName = t.TLSServerName
|
||||
}
|
||||
c.clientTLSConfig = clientTLSConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) TLSConfig() *tls.Config {
|
||||
c.modifyLock.RLock()
|
||||
defer c.modifyLock.RUnlock()
|
||||
return c.clientTLSConfig.Clone()
|
||||
}
|
||||
|
||||
// ConfigureTLS takes a set of TLS configurations and applies those to the
|
||||
// HTTP client.
|
||||
func (c *Config) ConfigureTLS(t *TLSConfig) error {
|
||||
@ -662,6 +673,7 @@ func (c *Client) CloneConfig() *Config {
|
||||
newConfig.CloneHeaders = c.config.CloneHeaders
|
||||
newConfig.CloneToken = c.config.CloneToken
|
||||
newConfig.ReadYourWrites = c.config.ReadYourWrites
|
||||
newConfig.clientTLSConfig = c.config.clientTLSConfig
|
||||
|
||||
// we specifically want a _copy_ of the client here, not a pointer to the original one
|
||||
newClient := *c.config.HttpClient
|
||||
@ -1362,6 +1374,7 @@ START:
|
||||
LastOutputPolicyError = &OutputPolicyError{
|
||||
method: req.Method,
|
||||
path: strings.TrimPrefix(req.URL.Path, "/v1"),
|
||||
params: req.URL.Query(),
|
||||
}
|
||||
return nil, LastOutputPolicyError
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/vault/api/help.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/help.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/kv.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/kv.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import "errors"
|
||||
|
3
vendor/github.com/hashicorp/vault/api/kv_v1.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/kv_v1.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/kv_v2.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/kv_v2.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
49
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
49
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -147,6 +150,13 @@ func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher,
|
||||
|
||||
random := i.Rand
|
||||
if random == nil {
|
||||
// NOTE:
|
||||
// Rather than a cryptographically secure random number generator (RNG),
|
||||
// the default behavior uses the math/rand package. The random number is
|
||||
// used to introduce a slight jitter when calculating the grace period
|
||||
// for a monitored secret monitoring. This is intended to stagger renewal
|
||||
// requests to the Vault server, but in a semi-predictable way, so there
|
||||
// is no need to use a cryptographically secure RNG.
|
||||
random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
|
||||
}
|
||||
|
||||
@ -337,25 +347,15 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool,
|
||||
|
||||
var sleepDuration time.Duration
|
||||
|
||||
if errorBackoff != nil {
|
||||
sleepDuration = errorBackoff.NextBackOff()
|
||||
if sleepDuration == backoff.Stop {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// We keep evaluating a new grace period so long as the lease is
|
||||
// extending. Once it stops extending, we've hit the max and need to
|
||||
// rely on the grace duration.
|
||||
if remainingLeaseDuration > priorDuration {
|
||||
r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second)
|
||||
}
|
||||
priorDuration = remainingLeaseDuration
|
||||
|
||||
// The sleep duration is set to 2/3 of the current lease duration plus
|
||||
// 1/3 of the current grace period, which adds jitter.
|
||||
sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
|
||||
if errorBackoff == nil {
|
||||
sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration)
|
||||
} else if errorBackoff.NextBackOff() == backoff.Stop {
|
||||
return err
|
||||
}
|
||||
|
||||
// remainingLeaseDuration becomes the priorDuration for the next loop
|
||||
priorDuration = remainingLeaseDuration
|
||||
|
||||
// If we are within grace, return now; or, if the amount of time we
|
||||
// would sleep would land us in the grace period. This helps with short
|
||||
// tokens; for example, you don't want a current lease duration of 4
|
||||
@ -377,6 +377,21 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool,
|
||||
}
|
||||
}
|
||||
|
||||
// calculateSleepDuration calculates the amount of time the LifeTimeWatcher should sleep
|
||||
// before re-entering its loop.
|
||||
func (r *LifetimeWatcher) calculateSleepDuration(remainingLeaseDuration, priorDuration time.Duration) time.Duration {
|
||||
// We keep evaluating a new grace period so long as the lease is
|
||||
// extending. Once it stops extending, we've hit the max and need to
|
||||
// rely on the grace duration.
|
||||
if remainingLeaseDuration > priorDuration {
|
||||
r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second)
|
||||
}
|
||||
|
||||
// The sleep duration is set to 2/3 of the current lease duration plus
|
||||
// 1/3 of the current grace period, which adds jitter.
|
||||
return time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
|
||||
}
|
||||
|
||||
// calculateGrace calculates the grace period based on the minimum of the
|
||||
// remaining lease duration and the token increment value; it also adds some
|
||||
// jitter to not have clients be in sync.
|
||||
|
3
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
35
vendor/github.com/hashicorp/vault/api/output_policy.go
generated
vendored
35
vendor/github.com/hashicorp/vault/api/output_policy.go
generated
vendored
@ -1,9 +1,13 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -16,6 +20,7 @@ var LastOutputPolicyError *OutputPolicyError
|
||||
type OutputPolicyError struct {
|
||||
method string
|
||||
path string
|
||||
params url.Values
|
||||
finalHCLString string
|
||||
}
|
||||
|
||||
@ -44,8 +49,22 @@ func (d *OutputPolicyError) HCLString() (string, error) {
|
||||
|
||||
// Builds a sample policy document from the request
|
||||
func (d *OutputPolicyError) buildSamplePolicy() (string, error) {
|
||||
operation := d.method
|
||||
// List is often defined as a URL param instead of as an http.Method
|
||||
// this will check for the header and properly switch off of the intended functionality
|
||||
if d.params.Has("list") {
|
||||
isList, err := strconv.ParseBool(d.params.Get("list"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("the value of the list url param is not a bool: %v", err)
|
||||
}
|
||||
|
||||
if isList {
|
||||
operation = "LIST"
|
||||
}
|
||||
}
|
||||
|
||||
var capabilities []string
|
||||
switch d.method {
|
||||
switch operation {
|
||||
case http.MethodGet, "":
|
||||
capabilities = append(capabilities, "read")
|
||||
case http.MethodPost, http.MethodPut:
|
||||
@ -59,17 +78,15 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) {
|
||||
capabilities = append(capabilities, "list")
|
||||
}
|
||||
|
||||
// sanitize, then trim the Vault address and v1 from the front of the path
|
||||
path, err := url.PathUnescape(d.path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unescape request URL characters: %v", err)
|
||||
}
|
||||
|
||||
// determine whether to add sudo capability
|
||||
if IsSudoPath(path) {
|
||||
if IsSudoPath(d.path) {
|
||||
capabilities = append(capabilities, "sudo")
|
||||
}
|
||||
|
||||
return formatOutputPolicy(d.path, capabilities), nil
|
||||
}
|
||||
|
||||
func formatOutputPolicy(path string, capabilities []string) string {
|
||||
// the OpenAPI response has a / in front of each path,
|
||||
// but policies need the path without that leading slash
|
||||
path = strings.TrimLeft(path, "/")
|
||||
@ -78,5 +95,5 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) {
|
||||
return fmt.Sprintf(
|
||||
`path "%s" {
|
||||
capabilities = ["%s"]
|
||||
}`, path, capStr), nil
|
||||
}`, path, capStr)
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/vault/api/output_string.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/output_string.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
11
vendor/github.com/hashicorp/vault/api/plugin_helpers.go
generated
vendored
11
vendor/github.com/hashicorp/vault/api/plugin_helpers.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -37,7 +40,7 @@ const (
|
||||
// path matches that path or not (useful specifically for the paths that
|
||||
// contain templated fields.)
|
||||
var sudoPaths = map[string]*regexp.Regexp{
|
||||
"/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`),
|
||||
"/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/?$`),
|
||||
"/pki/root": regexp.MustCompile(`^/pki/root$`),
|
||||
"/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`),
|
||||
"/sys/audit": regexp.MustCompile(`^/sys/audit$`),
|
||||
@ -47,10 +50,10 @@ var sudoPaths = map[string]*regexp.Regexp{
|
||||
"/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`),
|
||||
"/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`),
|
||||
"/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`),
|
||||
"/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`),
|
||||
"/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/?$`),
|
||||
"/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`),
|
||||
"/sys/leases": regexp.MustCompile(`^/sys/leases$`),
|
||||
"/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`),
|
||||
"/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/?$`),
|
||||
"/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`),
|
||||
"/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`),
|
||||
"/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`),
|
||||
@ -70,7 +73,7 @@ var sudoPaths = map[string]*regexp.Regexp{
|
||||
"/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`),
|
||||
"/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`),
|
||||
"/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`),
|
||||
"/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`),
|
||||
"/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`),
|
||||
"/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`),
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/hashicorp/vault/api/plugin_types.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/plugin_types.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
// NOTE: this file was copied from
|
||||
|
3
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/ssh.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/ssh.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
// Sys is used to perform system-related operations on Vault.
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_audit.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_audit.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_auth.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_auth.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_capabilities.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_capabilities.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_config_cors.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_config_cors.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_generate_root.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_generate_root.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_hastatus.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_hastatus.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_health.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_health.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_init.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_init.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_leader.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_leader.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_leases.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_leases.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_mfa.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_mfa.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_monitor.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_monitor.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_mounts.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_mounts.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_plugins.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_plugins.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_policy.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_policy.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_raft.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_raft.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_rekey.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_rekey.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_rotate.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_rotate.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_seal.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_seal.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
3
vendor/github.com/hashicorp/vault/api/sys_stepdown.go
generated
vendored
3
vendor/github.com/hashicorp/vault/api/sys_stepdown.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
264
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
264
vendor/github.com/hashicorp/vault/command/agent/auth/auth.go
generated
vendored
@ -8,13 +8,14 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
)
|
||||
|
||||
const (
|
||||
initialBackoff = 1 * time.Second
|
||||
defaultMinBackoff = 1 * time.Second
|
||||
defaultMaxBackoff = 5 * time.Minute
|
||||
)
|
||||
|
||||
@ -54,8 +55,10 @@ type AuthHandler struct {
|
||||
random *rand.Rand
|
||||
wrapTTL time.Duration
|
||||
maxBackoff time.Duration
|
||||
minBackoff time.Duration
|
||||
enableReauthOnNewCredentials bool
|
||||
enableTemplateTokenCh bool
|
||||
exitOnError bool
|
||||
}
|
||||
|
||||
type AuthHandlerConfig struct {
|
||||
@ -63,9 +66,11 @@ type AuthHandlerConfig struct {
|
||||
Client *api.Client
|
||||
WrapTTL time.Duration
|
||||
MaxBackoff time.Duration
|
||||
MinBackoff time.Duration
|
||||
Token string
|
||||
EnableReauthOnNewCredentials bool
|
||||
EnableTemplateTokenCh bool
|
||||
ExitOnError bool
|
||||
}
|
||||
|
||||
func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
|
||||
@ -79,15 +84,21 @@ func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
|
||||
client: conf.Client,
|
||||
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
wrapTTL: conf.WrapTTL,
|
||||
minBackoff: conf.MinBackoff,
|
||||
maxBackoff: conf.MaxBackoff,
|
||||
enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials,
|
||||
enableTemplateTokenCh: conf.EnableTemplateTokenCh,
|
||||
exitOnError: conf.ExitOnError,
|
||||
}
|
||||
|
||||
return ah
|
||||
}
|
||||
|
||||
func backoffOrQuit(ctx context.Context, backoff *agentBackoff) {
|
||||
func backoff(ctx context.Context, backoff *agentBackoff) bool {
|
||||
if backoff.exitOnErr {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(backoff.current):
|
||||
case <-ctx.Done():
|
||||
@ -96,6 +107,7 @@ func backoffOrQuit(ctx context.Context, backoff *agentBackoff) {
|
||||
// Increase exponential backoff for the next time if we don't
|
||||
// successfully auth/renew/etc.
|
||||
backoff.next()
|
||||
return true
|
||||
}
|
||||
|
||||
func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
@ -103,7 +115,15 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
return errors.New("auth handler: nil auth method")
|
||||
}
|
||||
|
||||
backoff := newAgentBackoff(ah.maxBackoff)
|
||||
if ah.minBackoff <= 0 {
|
||||
ah.minBackoff = defaultMinBackoff
|
||||
}
|
||||
|
||||
backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError)
|
||||
|
||||
if backoffCfg.min >= backoffCfg.max {
|
||||
return errors.New("auth handler: min_backoff cannot be greater than max_backoff")
|
||||
}
|
||||
|
||||
ah.logger.Info("starting auth handler")
|
||||
defer func() {
|
||||
@ -149,19 +169,29 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
var path string
|
||||
var data map[string]interface{}
|
||||
var header http.Header
|
||||
var isTokenFileMethod bool
|
||||
|
||||
switch am.(type) {
|
||||
case AuthMethodWithClient:
|
||||
clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
default:
|
||||
clientToUse = ah.client
|
||||
}
|
||||
|
||||
// Disable retry on the client to ensure our backoffOrQuit function is
|
||||
// the only source of retry/backoff.
|
||||
clientToUse.SetMaxRetries(0)
|
||||
|
||||
var secret *api.Secret = new(api.Secret)
|
||||
if first && ah.token != "" {
|
||||
ah.logger.Debug("using preloaded token")
|
||||
@ -170,11 +200,15 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
ah.logger.Debug("lookup-self with preloaded token")
|
||||
clientToUse.SetToken(ah.token)
|
||||
|
||||
secret, err = clientToUse.Logical().Read("auth/token/lookup-self")
|
||||
secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx)
|
||||
if err != nil {
|
||||
ah.logger.Error("could not look up token", "err", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
duration, _ := secret.Data["ttl"].(json.Number).Int64()
|
||||
@ -188,18 +222,26 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
|
||||
path, header, data, err = am.Authenticate(ctx, ah.client)
|
||||
if err != nil {
|
||||
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ah.wrapTTL > 0 {
|
||||
wrapClient, err := clientToUse.Clone()
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
wrapClient.SetWrappingLookupFunc(func(string, string) string {
|
||||
return ah.wrapTTL.String()
|
||||
@ -213,34 +255,65 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
}
|
||||
|
||||
// This should only happen if there's no preloaded token (regular auto-auth login)
|
||||
// or if a preloaded token has expired and is now switching to auto-auth.
|
||||
// or if a preloaded token has expired and is now switching to auto-auth.
|
||||
if secret.Auth == nil {
|
||||
secret, err = clientToUse.Logical().Write(path, data)
|
||||
isTokenFileMethod = path == "auth/token/lookup-self"
|
||||
if isTokenFileMethod {
|
||||
token, _ := data["token"].(string)
|
||||
lookupSelfClient, err := clientToUse.Clone()
|
||||
if err != nil {
|
||||
ah.logger.Error("failed to clone client to perform token lookup")
|
||||
return err
|
||||
}
|
||||
lookupSelfClient.SetToken(token)
|
||||
secret, err = lookupSelfClient.Auth().Token().LookupSelf()
|
||||
} else {
|
||||
secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data)
|
||||
}
|
||||
|
||||
// Check errors/sanity
|
||||
if err != nil {
|
||||
ah.logger.Error("error authenticating", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var leaseDuration int
|
||||
|
||||
switch {
|
||||
case ah.wrapTTL > 0:
|
||||
if secret.WrapInfo == nil {
|
||||
ah.logger.Error("authentication returned nil wrap info", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if secret.WrapInfo.Token == "" {
|
||||
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo)
|
||||
if err != nil {
|
||||
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing")
|
||||
ah.OutputCh <- string(wrappedResp)
|
||||
@ -249,7 +322,7 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoff.reset()
|
||||
backoffCfg.reset()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -262,24 +335,81 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
}
|
||||
|
||||
default:
|
||||
if secret == nil || secret.Auth == nil {
|
||||
ah.logger.Error("authentication returned nil auth info", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
if secret.Auth.ClientToken == "" {
|
||||
ah.logger.Error("authentication returned empty client token", "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
}
|
||||
ah.logger.Info("authentication successful, sending token to sinks")
|
||||
ah.OutputCh <- secret.Auth.ClientToken
|
||||
if ah.enableTemplateTokenCh {
|
||||
ah.TemplateTokenCh <- secret.Auth.ClientToken
|
||||
// We handle the token_file method specially, as it's the only
|
||||
// auth method that isn't actually authenticating, i.e. the secret
|
||||
// returned does not have an Auth struct attached
|
||||
isTokenFileMethod := path == "auth/token/lookup-self"
|
||||
if isTokenFileMethod {
|
||||
// We still check the response of the request to ensure the token is valid
|
||||
// i.e. if the token is invalid, we will fail in the authentication step
|
||||
if secret == nil || secret.Data == nil {
|
||||
ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
token, ok := secret.Data["id"].(string)
|
||||
if !ok || token == "" {
|
||||
ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
duration, _ := secret.Data["ttl"].(json.Number).Int64()
|
||||
leaseDuration = int(duration)
|
||||
renewable, _ := secret.Data["renewable"].(bool)
|
||||
secret.Auth = &api.SecretAuth{
|
||||
ClientToken: token,
|
||||
LeaseDuration: int(duration),
|
||||
Renewable: renewable,
|
||||
}
|
||||
ah.logger.Info("authentication successful, sending token to sinks")
|
||||
ah.OutputCh <- token
|
||||
if ah.enableTemplateTokenCh {
|
||||
ah.TemplateTokenCh <- token
|
||||
}
|
||||
|
||||
tokenType := secret.Data["type"].(string)
|
||||
if tokenType == "batch" {
|
||||
ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration)
|
||||
}
|
||||
} else {
|
||||
if secret == nil || secret.Auth == nil {
|
||||
ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if secret.Auth.ClientToken == "" {
|
||||
ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
leaseDuration = secret.LeaseDuration
|
||||
ah.logger.Info("authentication successful, sending token to sinks")
|
||||
ah.OutputCh <- secret.Auth.ClientToken
|
||||
if ah.enableTemplateTokenCh {
|
||||
ah.TemplateTokenCh <- secret.Auth.ClientToken
|
||||
}
|
||||
}
|
||||
|
||||
am.CredSuccess()
|
||||
backoff.reset()
|
||||
backoffCfg.reset()
|
||||
}
|
||||
|
||||
if watcher != nil {
|
||||
@ -290,14 +420,24 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
Secret: secret,
|
||||
})
|
||||
if err != nil {
|
||||
ah.logger.Error("error creating lifetime watcher, backing off and retrying", "error", err, "backoff", backoff)
|
||||
backoffOrQuit(ctx, backoff)
|
||||
continue
|
||||
ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg)
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
|
||||
if backoff(ctx, backoffCfg) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the renewal process
|
||||
ah.logger.Info("starting renewal process")
|
||||
go watcher.Renew()
|
||||
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
|
||||
// We don't want to trigger the renewal process for tokens with
|
||||
// unlimited TTL, such as the root token.
|
||||
if leaseDuration == 0 && isTokenFileMethod {
|
||||
ah.logger.Info("not starting token renewal process, as token has unlimited TTL")
|
||||
} else {
|
||||
ah.logger.Info("starting renewal process")
|
||||
go watcher.Renew()
|
||||
}
|
||||
|
||||
LifetimeWatcherLoop:
|
||||
for {
|
||||
@ -310,11 +450,13 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
case err := <-watcher.DoneCh():
|
||||
ah.logger.Info("lifetime watcher done channel triggered")
|
||||
if err != nil {
|
||||
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
|
||||
ah.logger.Error("error renewing token", "error", err)
|
||||
}
|
||||
break LifetimeWatcherLoop
|
||||
|
||||
case <-watcher.RenewCh():
|
||||
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
|
||||
ah.logger.Info("renewed auth token")
|
||||
|
||||
case <-credCh:
|
||||
@ -327,18 +469,26 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
|
||||
|
||||
// agentBackoff tracks exponential backoff state.
|
||||
type agentBackoff struct {
|
||||
max time.Duration
|
||||
current time.Duration
|
||||
min time.Duration
|
||||
max time.Duration
|
||||
current time.Duration
|
||||
exitOnErr bool
|
||||
}
|
||||
|
||||
func newAgentBackoff(max time.Duration) *agentBackoff {
|
||||
func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff {
|
||||
if max <= 0 {
|
||||
max = defaultMaxBackoff
|
||||
}
|
||||
|
||||
if min <= 0 {
|
||||
min = defaultMinBackoff
|
||||
}
|
||||
|
||||
return &agentBackoff{
|
||||
max: max,
|
||||
current: initialBackoff,
|
||||
current: min,
|
||||
max: max,
|
||||
min: min,
|
||||
exitOnErr: exitErr,
|
||||
}
|
||||
}
|
||||
|
||||
@ -357,7 +507,7 @@ func (b *agentBackoff) next() {
|
||||
}
|
||||
|
||||
func (b *agentBackoff) reset() {
|
||||
b.current = initialBackoff
|
||||
b.current = b.min
|
||||
}
|
||||
|
||||
func (b agentBackoff) String() string {
|
||||
|
32
vendor/github.com/imdario/mergo/README.md
generated
vendored
32
vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -8,8 +8,7 @@
|
||||
[![Coverage Status][9]][10]
|
||||
[![Sourcegraph][11]][12]
|
||||
[![FOSSA Status][13]][14]
|
||||
|
||||
[![GoCenter Kudos][15]][16]
|
||||
[![Become my sponsor][15]][16]
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
@ -25,8 +24,8 @@
|
||||
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
|
||||
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
||||
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
|
||||
[16]: https://search.gocenter.io/github.com/imdario/mergo
|
||||
[15]: https://img.shields.io/github/sponsors/imdario
|
||||
[16]: https://github.com/sponsors/imdario
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
|
||||
|
||||
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
|
||||
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [cli/cli](https://github.com/cli/cli)
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
|
||||
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
|
||||
|
||||
## Install
|
||||
|
||||
@ -168,7 +169,7 @@ func main() {
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
### Transformers
|
||||
|
||||
@ -218,7 +219,6 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
|
||||
@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
|
||||
|
||||
Written by [Dario Castañé](http://dario.im).
|
||||
|
||||
## Top Contributors
|
||||
|
||||
[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
|
||||
[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
|
||||
[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
|
||||
[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
|
||||
[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
|
||||
[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
|
||||
[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
|
||||
[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
|
||||
|
2
vendor/github.com/imdario/mergo/merge.go
generated
vendored
2
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
|
||||
if config.Transformers != nil && !isEmptyValue(dst) {
|
||||
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
|
4
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
4
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
@ -17,7 +17,7 @@ import (
|
||||
var (
|
||||
ErrNilArguments = errors.New("src and dst must not be nil")
|
||||
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
|
||||
ErrNotSupported = errors.New("only structs and maps are supported")
|
||||
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
|
||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||
ErrNonPointerAgument = errors.New("dst must be a pointer")
|
||||
@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
|
||||
return
|
||||
}
|
||||
vDst = reflect.ValueOf(dst).Elem()
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
|
||||
err = ErrNotSupported
|
||||
return
|
||||
}
|
||||
|
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@ -141,12 +140,11 @@ func CallerInfo() []string {
|
||||
}
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
file = parts[len(parts)-1]
|
||||
if len(parts) > 1 {
|
||||
filename := parts[len(parts)-1]
|
||||
dir := parts[len(parts)-2]
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
|
||||
path, _ := filepath.Abs(file)
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", path, line))
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
}
|
||||
|
||||
@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
|
||||
[]reflect.Kind{
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice},
|
||||
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
|
||||
kind)
|
||||
|
||||
if isNilableKind && value.IsNil() {
|
||||
@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||
return true // we consider nil to be equal to the nil set
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
|
||||
if !av.IsValid() {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
|
||||
}
|
||||
if !found {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
if !av.IsValid() {
|
||||
return true
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
|
129
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
129
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -8,6 +8,126 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.11.1/0.33.0] 2022-10-19
|
||||
|
||||
### Added
|
||||
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
|
||||
By default, it will register with the default Prometheus registerer.
|
||||
A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
|
||||
- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
|
||||
|
||||
### Changed
|
||||
|
||||
- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
|
||||
It will return an error if the exporter fails to register with Prometheus. (#3239)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
|
||||
- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
|
||||
This fixes the implementation to be compliant with the W3C specification. (#3226)
|
||||
- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
|
||||
- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
|
||||
- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
|
||||
- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
|
||||
- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
|
||||
Instead the exporter is defined as an "unchecked" collector for Prometheus.
|
||||
This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
|
||||
This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
|
||||
|
||||
## [1.11.0/0.32.3] 2022-10-12
|
||||
|
||||
### Added
|
||||
|
||||
- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
|
||||
|
||||
### Changed
|
||||
|
||||
- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
|
||||
- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
|
||||
This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
|
||||
|
||||
## [0.32.2] Metric SDK (Alpha) - 2022-10-11
|
||||
|
||||
### Added
|
||||
|
||||
- Added an example of using metric views to customize instruments. (#3177)
|
||||
- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
|
||||
|
||||
### Changed
|
||||
|
||||
- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
|
||||
- Update histogram default bounds to match the requirements of the latest specification. (#3222)
|
||||
- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
|
||||
- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
|
||||
- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
|
||||
- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
|
||||
- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
|
||||
|
||||
## [0.32.1] Metric SDK (Alpha) - 2022-09-22
|
||||
|
||||
### Changed
|
||||
|
||||
- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
|
||||
Invalid characters are replaced with `_`. (#3212)
|
||||
|
||||
### Added
|
||||
|
||||
- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
|
||||
- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Updated go.mods to point to valid versions of the sdk. (#3216)
|
||||
- Set the `MeterProvider` resource on all exported metric data. (#3218)
|
||||
|
||||
## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
|
||||
|
||||
### Changed
|
||||
|
||||
- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
|
||||
Please see the package documentation for how the new SDK is initialized and configured. (#3175)
|
||||
- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
|
||||
|
||||
### Removed
|
||||
|
||||
- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
|
||||
A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
|
||||
A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
|
||||
- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
|
||||
|
||||
## [1.10.0] - 2022-09-09
|
||||
|
||||
### Added
|
||||
@ -191,7 +311,7 @@ Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be mod
|
||||
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
|
||||
- `OTEL_SPAN_LINK_COUNT_LIMIT`
|
||||
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
|
||||
|
||||
|
||||
If the provided environment variables are invalid (negative), the default values would be used.
|
||||
- Rename the `gc` runtime name to `go` (#2560)
|
||||
- Add resource container ID detection. (#2418)
|
||||
@ -1907,7 +2027,12 @@ It contains api and sdk for trace and meter.
|
||||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.1...HEAD
|
||||
[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
|
||||
[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
|
||||
[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
|
||||
[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
|
||||
[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
|
||||
[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
|
||||
[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
|
||||
[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
|
||||
|
2
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
2
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
@ -156,7 +156,7 @@ go-mod-tidy/%: DIR=$*
|
||||
go-mod-tidy/%: | crosslink
|
||||
@echo "$(GO) mod tidy in $(DIR)" \
|
||||
&& cd $(DIR) \
|
||||
&& $(GO) mod tidy -compat=1.17
|
||||
&& $(GO) mod tidy -compat=1.18
|
||||
|
||||
.PHONY: lint-modules
|
||||
lint-modules: go-mod-tidy
|
||||
|
5
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
5
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
@ -52,19 +52,14 @@ Currently, this project supports the following environments.
|
||||
| ------- | ---------- | ------------ |
|
||||
| Ubuntu | 1.19 | amd64 |
|
||||
| Ubuntu | 1.18 | amd64 |
|
||||
| Ubuntu | 1.17 | amd64 |
|
||||
| Ubuntu | 1.19 | 386 |
|
||||
| Ubuntu | 1.18 | 386 |
|
||||
| Ubuntu | 1.17 | 386 |
|
||||
| MacOS | 1.19 | amd64 |
|
||||
| MacOS | 1.18 | amd64 |
|
||||
| MacOS | 1.17 | amd64 |
|
||||
| Windows | 1.19 | amd64 |
|
||||
| Windows | 1.18 | amd64 |
|
||||
| Windows | 1.17 | amd64 |
|
||||
| Windows | 1.19 | 386 |
|
||||
| Windows | 1.18 | 386 |
|
||||
| Windows | 1.17 | 386 |
|
||||
|
||||
While this project should work for other systems, no compatibility guarantees
|
||||
are made for those systems currently.
|
||||
|
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"go.opentelemetry.io/otel/internal"
|
||||
"go.opentelemetry.io/otel/internal/attribute"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=Type
|
||||
@ -66,12 +68,7 @@ func BoolValue(v bool) Value {
|
||||
|
||||
// BoolSliceValue creates a BOOLSLICE Value.
|
||||
func BoolSliceValue(v []bool) Value {
|
||||
cp := make([]bool, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: BOOLSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// IntValue creates an INT64 Value.
|
||||
@ -81,13 +78,14 @@ func IntValue(v int) Value {
|
||||
|
||||
// IntSliceValue creates an INTSLICE Value.
|
||||
func IntSliceValue(v []int) Value {
|
||||
cp := make([]int64, 0, len(v))
|
||||
for _, i := range v {
|
||||
cp = append(cp, int64(i))
|
||||
var int64Val int64
|
||||
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
|
||||
for i, val := range v {
|
||||
cp.Elem().Index(i).SetInt(int64(val))
|
||||
}
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
slice: cp.Elem().Interface(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,12 +99,7 @@ func Int64Value(v int64) Value {
|
||||
|
||||
// Int64SliceValue creates an INT64SLICE Value.
|
||||
func Int64SliceValue(v []int64) Value {
|
||||
cp := make([]int64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Float64Value creates a FLOAT64 Value.
|
||||
@ -119,12 +112,7 @@ func Float64Value(v float64) Value {
|
||||
|
||||
// Float64SliceValue creates a FLOAT64SLICE Value.
|
||||
func Float64SliceValue(v []float64) Value {
|
||||
cp := make([]float64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: FLOAT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// StringValue creates a STRING Value.
|
||||
@ -137,12 +125,7 @@ func StringValue(v string) Value {
|
||||
|
||||
// StringSliceValue creates a STRINGSLICE Value.
|
||||
func StringSliceValue(v []string) Value {
|
||||
cp := make([]string, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: STRINGSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Type returns a type of the Value.
|
||||
@ -159,10 +142,7 @@ func (v Value) AsBool() bool {
|
||||
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
|
||||
// BOOLSLICE.
|
||||
func (v Value) AsBoolSlice() []bool {
|
||||
if s, ok := v.slice.(*[]bool); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[bool](v.slice)
|
||||
}
|
||||
|
||||
// AsInt64 returns the int64 value. Make sure that the Value's type is
|
||||
@ -174,10 +154,7 @@ func (v Value) AsInt64() int64 {
|
||||
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
|
||||
// INT64SLICE.
|
||||
func (v Value) AsInt64Slice() []int64 {
|
||||
if s, ok := v.slice.(*[]int64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[int64](v.slice)
|
||||
}
|
||||
|
||||
// AsFloat64 returns the float64 value. Make sure that the Value's
|
||||
@ -189,10 +166,7 @@ func (v Value) AsFloat64() float64 {
|
||||
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
|
||||
// FLOAT64SLICE.
|
||||
func (v Value) AsFloat64Slice() []float64 {
|
||||
if s, ok := v.slice.(*[]float64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[float64](v.slice)
|
||||
}
|
||||
|
||||
// AsString returns the string value. Make sure that the Value's type
|
||||
@ -204,10 +178,7 @@ func (v Value) AsString() string {
|
||||
// AsStringSlice returns the []string value. Make sure that the Value's type is
|
||||
// STRINGSLICE.
|
||||
func (v Value) AsStringSlice() []string {
|
||||
if s, ok := v.slice.(*[]string); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[string](v.slice)
|
||||
}
|
||||
|
||||
type unknownValueType struct{}
|
||||
@ -239,19 +210,19 @@ func (v Value) AsInterface() interface{} {
|
||||
func (v Value) Emit() string {
|
||||
switch v.Type() {
|
||||
case BOOLSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]bool)))
|
||||
return fmt.Sprint(v.AsBoolSlice())
|
||||
case BOOL:
|
||||
return strconv.FormatBool(v.AsBool())
|
||||
case INT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]int64)))
|
||||
return fmt.Sprint(v.AsInt64Slice())
|
||||
case INT64:
|
||||
return strconv.FormatInt(v.AsInt64(), 10)
|
||||
case FLOAT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]float64)))
|
||||
return fmt.Sprint(v.AsFloat64Slice())
|
||||
case FLOAT64:
|
||||
return fmt.Sprint(v.AsFloat64())
|
||||
case STRINGSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]string)))
|
||||
return fmt.Sprint(v.AsStringSlice())
|
||||
case STRING:
|
||||
return v.stringly
|
||||
default:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user