mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
rebase: bump github.com/hashicorp/vault/api from 1.8.3 to 1.9.0
Bumps [github.com/hashicorp/vault/api](https://github.com/hashicorp/vault) from 1.8.3 to 1.9.0. - [Release notes](https://github.com/hashicorp/vault/releases) - [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/vault/compare/v1.8.3...v1.9.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/vault/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
3dd6fb40f1
commit
cb74487f75
15
go.mod
15
go.mod
@ -18,7 +18,7 @@ require (
|
|||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/hashicorp/vault/api v1.8.3
|
github.com/hashicorp/vault/api v1.9.0
|
||||||
github.com/kubernetes-csi/csi-lib-utils v0.11.0
|
github.com/kubernetes-csi/csi-lib-utils v0.11.0
|
||||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
||||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||||
@ -50,8 +50,6 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/ansel1/merry v1.6.2 // indirect
|
github.com/ansel1/merry v1.6.2 // indirect
|
||||||
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
||||||
github.com/armon/go-metrics v0.3.9 // indirect
|
|
||||||
github.com/armon/go-radix v1.0.0 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.4 // indirect
|
github.com/aws/aws-sdk-go-v2 v1.17.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
||||||
@ -88,22 +86,15 @@ require (
|
|||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-plugin v1.4.5 // indirect
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect
|
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
|
||||||
github.com/hashicorp/go-version v1.2.0 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/hashicorp/vault v1.4.2 // indirect
|
github.com/hashicorp/vault v1.4.2 // indirect
|
||||||
github.com/hashicorp/vault/sdk v0.7.0 // indirect
|
github.com/hashicorp/vault/sdk v0.7.0 // indirect
|
||||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
|
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
@ -115,17 +106,13 @@ require (
|
|||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/mitchellh/copystructure v1.0.0 // indirect
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
|
||||||
github.com/moby/spdystream v0.2.0 // indirect
|
github.com/moby/spdystream v0.2.0 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/oklog/run v1.0.0 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
||||||
|
15
go.sum
15
go.sum
@ -120,7 +120,6 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQh
|
|||||||
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
|
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
|
||||||
github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||||
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
||||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
|
||||||
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g=
|
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g=
|
||||||
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
|
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
@ -289,7 +288,6 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl
|
|||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
@ -297,7 +295,7 @@ github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM
|
|||||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
|
github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
|
||||||
github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
|
github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
|
||||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
@ -537,7 +535,6 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39
|
|||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
|
||||||
github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew=
|
github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew=
|
||||||
github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs=
|
github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs=
|
||||||
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs=
|
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs=
|
||||||
@ -555,7 +552,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
|
|||||||
github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||||
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||||
github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo=
|
github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo=
|
||||||
github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=
|
|
||||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4=
|
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4=
|
||||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU=
|
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU=
|
||||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||||
@ -570,7 +566,6 @@ github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5O
|
|||||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mXbCDcLfs2kP7ssP7+DQ=
|
github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mXbCDcLfs2kP7ssP7+DQ=
|
||||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc=
|
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc=
|
||||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
|
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
|
||||||
@ -596,7 +591,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
@ -640,8 +634,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
|
|||||||
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||||
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
||||||
github.com/hashicorp/vault/api v1.8.3 h1:cHQOLcMhBR+aVI0HzhPxO62w2+gJhIrKguQNONPzu6o=
|
github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8=
|
||||||
github.com/hashicorp/vault/api v1.8.3/go.mod h1:4g/9lj9lmuJQMtT6CmVMHC5FW1yENaVv+Nv4ZfG8fAg=
|
github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM=
|
||||||
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
|
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
|
||||||
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
||||||
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
|
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
|
||||||
@ -681,7 +675,6 @@ github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2
|
|||||||
github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI=
|
github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI=
|
||||||
github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ=
|
github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
@ -865,7 +858,6 @@ github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnh
|
|||||||
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||||
github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0=
|
github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||||
@ -1000,7 +992,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
26
vendor/github.com/armon/go-metrics/.gitignore
generated
vendored
26
vendor/github.com/armon/go-metrics/.gitignore
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
|
|
||||||
/metrics.out
|
|
||||||
|
|
||||||
.idea
|
|
13
vendor/github.com/armon/go-metrics/.travis.yml
generated
vendored
13
vendor/github.com/armon/go-metrics/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- "1.x"
|
|
||||||
|
|
||||||
env:
|
|
||||||
- GO111MODULE=on
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get ./...
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test ./...
|
|
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2013 Armon Dadgar
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
91
vendor/github.com/armon/go-metrics/README.md
generated
vendored
91
vendor/github.com/armon/go-metrics/README.md
generated
vendored
@ -1,91 +0,0 @@
|
|||||||
go-metrics
|
|
||||||
==========
|
|
||||||
|
|
||||||
This library provides a `metrics` package which can be used to instrument code,
|
|
||||||
expose application metrics, and profile runtime performance in a flexible manner.
|
|
||||||
|
|
||||||
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
|
|
||||||
|
|
||||||
Sinks
|
|
||||||
-----
|
|
||||||
|
|
||||||
The `metrics` package makes use of a `MetricSink` interface to support delivery
|
|
||||||
to any type of backend. Currently the following sinks are provided:
|
|
||||||
|
|
||||||
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
|
|
||||||
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
|
|
||||||
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
|
|
||||||
* InmemSink : Provides in-memory aggregation, can be used to export stats
|
|
||||||
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
|
|
||||||
* BlackholeSink : Sinks to nowhere
|
|
||||||
|
|
||||||
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
|
|
||||||
and dump a formatted output of recent metrics. For example, when a process gets
|
|
||||||
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
|
|
||||||
|
|
||||||
Labels
|
|
||||||
------
|
|
||||||
|
|
||||||
Most metrics do have an equivalent ending with `WithLabels`, such methods
|
|
||||||
allow to push metrics with labels and use some features of underlying Sinks
|
|
||||||
(ex: translated into Prometheus labels).
|
|
||||||
|
|
||||||
Since some of these labels may increase greatly cardinality of metrics, the
|
|
||||||
library allow to filter labels using a blacklist/whitelist filtering system
|
|
||||||
which is global to all metrics.
|
|
||||||
|
|
||||||
* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
|
|
||||||
* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
|
|
||||||
|
|
||||||
By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
|
|
||||||
no tags are filetered at all, but it allow to a user to globally block some tags with high
|
|
||||||
cardinality at application level.
|
|
||||||
|
|
||||||
Examples
|
|
||||||
--------
|
|
||||||
|
|
||||||
Here is an example of using the package:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func SlowMethod() {
|
|
||||||
// Profiling the runtime of a method
|
|
||||||
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure a statsite sink as the global metrics sink
|
|
||||||
sink, _ := metrics.NewStatsiteSink("statsite:8125")
|
|
||||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
|
|
||||||
|
|
||||||
// Emit a Key/Value pair
|
|
||||||
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is an example of setting up a signal handler:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Setup the inmem sink and signal handler
|
|
||||||
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
|
||||||
sig := metrics.DefaultInmemSignal(inm)
|
|
||||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
|
|
||||||
|
|
||||||
// Run some code
|
|
||||||
inm.SetGauge([]string{"foo"}, 42)
|
|
||||||
inm.EmitKey([]string{"bar"}, 30)
|
|
||||||
|
|
||||||
inm.IncrCounter([]string{"baz"}, 42)
|
|
||||||
inm.IncrCounter([]string{"baz"}, 1)
|
|
||||||
inm.IncrCounter([]string{"baz"}, 80)
|
|
||||||
|
|
||||||
inm.AddSample([]string{"method", "wow"}, 42)
|
|
||||||
inm.AddSample([]string{"method", "wow"}, 100)
|
|
||||||
inm.AddSample([]string{"method", "wow"}, 22)
|
|
||||||
|
|
||||||
....
|
|
||||||
```
|
|
||||||
|
|
||||||
When a signal comes in, output like the following will be dumped to stderr:
|
|
||||||
|
|
||||||
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
|
|
||||||
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
|
|
||||||
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
|
|
||||||
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
|
|
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultSignal is used with DefaultInmemSignal
|
|
||||||
DefaultSignal = syscall.SIGUSR1
|
|
||||||
)
|
|
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultSignal is used with DefaultInmemSignal
|
|
||||||
// Windows has no SIGUSR1, use SIGBREAK
|
|
||||||
DefaultSignal = syscall.Signal(21)
|
|
||||||
)
|
|
339
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
339
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
@ -1,339 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var spaceReplacer = strings.NewReplacer(" ", "_")
|
|
||||||
|
|
||||||
// InmemSink provides a MetricSink that does in-memory aggregation
|
|
||||||
// without sending metrics over a network. It can be embedded within
|
|
||||||
// an application to provide profiling information.
|
|
||||||
type InmemSink struct {
|
|
||||||
// How long is each aggregation interval
|
|
||||||
interval time.Duration
|
|
||||||
|
|
||||||
// Retain controls how many metrics interval we keep
|
|
||||||
retain time.Duration
|
|
||||||
|
|
||||||
// maxIntervals is the maximum length of intervals.
|
|
||||||
// It is retain / interval.
|
|
||||||
maxIntervals int
|
|
||||||
|
|
||||||
// intervals is a slice of the retained intervals
|
|
||||||
intervals []*IntervalMetrics
|
|
||||||
intervalLock sync.RWMutex
|
|
||||||
|
|
||||||
rateDenom float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntervalMetrics stores the aggregated metrics
|
|
||||||
// for a specific interval
|
|
||||||
type IntervalMetrics struct {
|
|
||||||
sync.RWMutex
|
|
||||||
|
|
||||||
// The start time of the interval
|
|
||||||
Interval time.Time
|
|
||||||
|
|
||||||
// Gauges maps the key to the last set value
|
|
||||||
Gauges map[string]GaugeValue
|
|
||||||
|
|
||||||
// Points maps the string to the list of emitted values
|
|
||||||
// from EmitKey
|
|
||||||
Points map[string][]float32
|
|
||||||
|
|
||||||
// Counters maps the string key to a sum of the counter
|
|
||||||
// values
|
|
||||||
Counters map[string]SampledValue
|
|
||||||
|
|
||||||
// Samples maps the key to an AggregateSample,
|
|
||||||
// which has the rolled up view of a sample
|
|
||||||
Samples map[string]SampledValue
|
|
||||||
|
|
||||||
// done is closed when this interval has ended, and a new IntervalMetrics
|
|
||||||
// has been created to receive any future metrics.
|
|
||||||
done chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
|
|
||||||
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
|
||||||
return &IntervalMetrics{
|
|
||||||
Interval: intv,
|
|
||||||
Gauges: make(map[string]GaugeValue),
|
|
||||||
Points: make(map[string][]float32),
|
|
||||||
Counters: make(map[string]SampledValue),
|
|
||||||
Samples: make(map[string]SampledValue),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AggregateSample is used to hold aggregate metrics
|
|
||||||
// about a sample
|
|
||||||
type AggregateSample struct {
|
|
||||||
Count int // The count of emitted pairs
|
|
||||||
Rate float64 // The values rate per time unit (usually 1 second)
|
|
||||||
Sum float64 // The sum of values
|
|
||||||
SumSq float64 `json:"-"` // The sum of squared values
|
|
||||||
Min float64 // Minimum value
|
|
||||||
Max float64 // Maximum value
|
|
||||||
LastUpdated time.Time `json:"-"` // When value was last updated
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computes a Stddev of the values
|
|
||||||
func (a *AggregateSample) Stddev() float64 {
|
|
||||||
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
|
|
||||||
div := float64(a.Count * (a.Count - 1))
|
|
||||||
if div == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return math.Sqrt(num / div)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computes a mean of the values
|
|
||||||
func (a *AggregateSample) Mean() float64 {
|
|
||||||
if a.Count == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return a.Sum / float64(a.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ingest is used to update a sample
|
|
||||||
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
|
|
||||||
a.Count++
|
|
||||||
a.Sum += v
|
|
||||||
a.SumSq += (v * v)
|
|
||||||
if v < a.Min || a.Count == 1 {
|
|
||||||
a.Min = v
|
|
||||||
}
|
|
||||||
if v > a.Max || a.Count == 1 {
|
|
||||||
a.Max = v
|
|
||||||
}
|
|
||||||
a.Rate = float64(a.Sum) / rateDenom
|
|
||||||
a.LastUpdated = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AggregateSample) String() string {
|
|
||||||
if a.Count == 0 {
|
|
||||||
return "Count: 0"
|
|
||||||
} else if a.Stddev() == 0 {
|
|
||||||
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
|
|
||||||
} else {
|
|
||||||
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
|
|
||||||
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
|
|
||||||
// (and tested) from NewMetricSinkFromURL.
|
|
||||||
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
|
|
||||||
params := u.Query()
|
|
||||||
|
|
||||||
interval, err := time.ParseDuration(params.Get("interval"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
retain, err := time.ParseDuration(params.Get("retain"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewInmemSink(interval, retain), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInmemSink is used to construct a new in-memory sink.
|
|
||||||
// Uses an aggregation interval and maximum retention period.
|
|
||||||
func NewInmemSink(interval, retain time.Duration) *InmemSink {
|
|
||||||
rateTimeUnit := time.Second
|
|
||||||
i := &InmemSink{
|
|
||||||
interval: interval,
|
|
||||||
retain: retain,
|
|
||||||
maxIntervals: int(retain / interval),
|
|
||||||
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
|
|
||||||
}
|
|
||||||
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) SetGauge(key []string, val float32) {
|
|
||||||
i.SetGaugeWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
k, name := i.flattenKeyLabels(key, labels)
|
|
||||||
intv := i.getInterval()
|
|
||||||
|
|
||||||
intv.Lock()
|
|
||||||
defer intv.Unlock()
|
|
||||||
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) EmitKey(key []string, val float32) {
|
|
||||||
k := i.flattenKey(key)
|
|
||||||
intv := i.getInterval()
|
|
||||||
|
|
||||||
intv.Lock()
|
|
||||||
defer intv.Unlock()
|
|
||||||
vals := intv.Points[k]
|
|
||||||
intv.Points[k] = append(vals, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) IncrCounter(key []string, val float32) {
|
|
||||||
i.IncrCounterWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
k, name := i.flattenKeyLabels(key, labels)
|
|
||||||
intv := i.getInterval()
|
|
||||||
|
|
||||||
intv.Lock()
|
|
||||||
defer intv.Unlock()
|
|
||||||
|
|
||||||
agg, ok := intv.Counters[k]
|
|
||||||
if !ok {
|
|
||||||
agg = SampledValue{
|
|
||||||
Name: name,
|
|
||||||
AggregateSample: &AggregateSample{},
|
|
||||||
Labels: labels,
|
|
||||||
}
|
|
||||||
intv.Counters[k] = agg
|
|
||||||
}
|
|
||||||
agg.Ingest(float64(val), i.rateDenom)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) AddSample(key []string, val float32) {
|
|
||||||
i.AddSampleWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
k, name := i.flattenKeyLabels(key, labels)
|
|
||||||
intv := i.getInterval()
|
|
||||||
|
|
||||||
intv.Lock()
|
|
||||||
defer intv.Unlock()
|
|
||||||
|
|
||||||
agg, ok := intv.Samples[k]
|
|
||||||
if !ok {
|
|
||||||
agg = SampledValue{
|
|
||||||
Name: name,
|
|
||||||
AggregateSample: &AggregateSample{},
|
|
||||||
Labels: labels,
|
|
||||||
}
|
|
||||||
intv.Samples[k] = agg
|
|
||||||
}
|
|
||||||
agg.Ingest(float64(val), i.rateDenom)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data is used to retrieve all the aggregated metrics
|
|
||||||
// Intervals may be in use, and a read lock should be acquired
|
|
||||||
func (i *InmemSink) Data() []*IntervalMetrics {
|
|
||||||
// Get the current interval, forces creation
|
|
||||||
i.getInterval()
|
|
||||||
|
|
||||||
i.intervalLock.RLock()
|
|
||||||
defer i.intervalLock.RUnlock()
|
|
||||||
|
|
||||||
n := len(i.intervals)
|
|
||||||
intervals := make([]*IntervalMetrics, n)
|
|
||||||
|
|
||||||
copy(intervals[:n-1], i.intervals[:n-1])
|
|
||||||
current := i.intervals[n-1]
|
|
||||||
|
|
||||||
// make its own copy for current interval
|
|
||||||
intervals[n-1] = &IntervalMetrics{}
|
|
||||||
copyCurrent := intervals[n-1]
|
|
||||||
current.RLock()
|
|
||||||
*copyCurrent = *current
|
|
||||||
// RWMutex is not safe to copy, so create a new instance on the copy
|
|
||||||
copyCurrent.RWMutex = sync.RWMutex{}
|
|
||||||
|
|
||||||
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
|
|
||||||
for k, v := range current.Gauges {
|
|
||||||
copyCurrent.Gauges[k] = v
|
|
||||||
}
|
|
||||||
// saved values will be not change, just copy its link
|
|
||||||
copyCurrent.Points = make(map[string][]float32, len(current.Points))
|
|
||||||
for k, v := range current.Points {
|
|
||||||
copyCurrent.Points[k] = v
|
|
||||||
}
|
|
||||||
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
|
|
||||||
for k, v := range current.Counters {
|
|
||||||
copyCurrent.Counters[k] = v.deepCopy()
|
|
||||||
}
|
|
||||||
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
|
|
||||||
for k, v := range current.Samples {
|
|
||||||
copyCurrent.Samples[k] = v.deepCopy()
|
|
||||||
}
|
|
||||||
current.RUnlock()
|
|
||||||
|
|
||||||
return intervals
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInterval returns the current interval. A new interval is created if no
|
|
||||||
// previous interval exists, or if the current time is beyond the window for the
|
|
||||||
// current interval.
|
|
||||||
func (i *InmemSink) getInterval() *IntervalMetrics {
|
|
||||||
intv := time.Now().Truncate(i.interval)
|
|
||||||
|
|
||||||
// Attempt to return the existing interval first, because it only requires
|
|
||||||
// a read lock.
|
|
||||||
i.intervalLock.RLock()
|
|
||||||
n := len(i.intervals)
|
|
||||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
|
||||||
defer i.intervalLock.RUnlock()
|
|
||||||
return i.intervals[n-1]
|
|
||||||
}
|
|
||||||
i.intervalLock.RUnlock()
|
|
||||||
|
|
||||||
i.intervalLock.Lock()
|
|
||||||
defer i.intervalLock.Unlock()
|
|
||||||
|
|
||||||
// Re-check for an existing interval now that the lock is re-acquired.
|
|
||||||
n = len(i.intervals)
|
|
||||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
|
||||||
return i.intervals[n-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
current := NewIntervalMetrics(intv)
|
|
||||||
i.intervals = append(i.intervals, current)
|
|
||||||
if n > 0 {
|
|
||||||
close(i.intervals[n-1].done)
|
|
||||||
}
|
|
||||||
|
|
||||||
n++
|
|
||||||
// Prune old intervals if the count exceeds the max.
|
|
||||||
if n >= i.maxIntervals {
|
|
||||||
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
|
|
||||||
i.intervals = i.intervals[:i.maxIntervals]
|
|
||||||
}
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key for formatting, removes spaces
|
|
||||||
func (i *InmemSink) flattenKey(parts []string) string {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
joined := strings.Join(parts, ".")
|
|
||||||
|
|
||||||
spaceReplacer.WriteString(buf, joined)
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key for formatting along with its labels, removes spaces
|
|
||||||
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
|
|
||||||
key := i.flattenKey(parts)
|
|
||||||
buf := bytes.NewBufferString(key)
|
|
||||||
|
|
||||||
for _, label := range labels {
|
|
||||||
spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String(), key
|
|
||||||
}
|
|
162
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
162
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MetricsSummary holds a roll-up of metrics info for a given interval
|
|
||||||
type MetricsSummary struct {
|
|
||||||
Timestamp string
|
|
||||||
Gauges []GaugeValue
|
|
||||||
Points []PointValue
|
|
||||||
Counters []SampledValue
|
|
||||||
Samples []SampledValue
|
|
||||||
}
|
|
||||||
|
|
||||||
type GaugeValue struct {
|
|
||||||
Name string
|
|
||||||
Hash string `json:"-"`
|
|
||||||
Value float32
|
|
||||||
|
|
||||||
Labels []Label `json:"-"`
|
|
||||||
DisplayLabels map[string]string `json:"Labels"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PointValue struct {
|
|
||||||
Name string
|
|
||||||
Points []float32
|
|
||||||
}
|
|
||||||
|
|
||||||
type SampledValue struct {
|
|
||||||
Name string
|
|
||||||
Hash string `json:"-"`
|
|
||||||
*AggregateSample
|
|
||||||
Mean float64
|
|
||||||
Stddev float64
|
|
||||||
|
|
||||||
Labels []Label `json:"-"`
|
|
||||||
DisplayLabels map[string]string `json:"Labels"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepCopy allocates a new instance of AggregateSample
|
|
||||||
func (source *SampledValue) deepCopy() SampledValue {
|
|
||||||
dest := *source
|
|
||||||
if source.AggregateSample != nil {
|
|
||||||
dest.AggregateSample = &AggregateSample{}
|
|
||||||
*dest.AggregateSample = *source.AggregateSample
|
|
||||||
}
|
|
||||||
return dest
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
|
|
||||||
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
||||||
data := i.Data()
|
|
||||||
|
|
||||||
var interval *IntervalMetrics
|
|
||||||
n := len(data)
|
|
||||||
switch {
|
|
||||||
case n == 0:
|
|
||||||
return nil, fmt.Errorf("no metric intervals have been initialized yet")
|
|
||||||
case n == 1:
|
|
||||||
// Show the current interval if it's all we have
|
|
||||||
interval = data[0]
|
|
||||||
default:
|
|
||||||
// Show the most recent finished interval if we have one
|
|
||||||
interval = data[n-2]
|
|
||||||
}
|
|
||||||
|
|
||||||
return newMetricSummaryFromInterval(interval), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary {
|
|
||||||
interval.RLock()
|
|
||||||
defer interval.RUnlock()
|
|
||||||
|
|
||||||
summary := MetricsSummary{
|
|
||||||
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
|
|
||||||
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
|
|
||||||
Points: make([]PointValue, 0, len(interval.Points)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format and sort the output of each metric type, so it gets displayed in a
|
|
||||||
// deterministic order.
|
|
||||||
for name, points := range interval.Points {
|
|
||||||
summary.Points = append(summary.Points, PointValue{name, points})
|
|
||||||
}
|
|
||||||
sort.Slice(summary.Points, func(i, j int) bool {
|
|
||||||
return summary.Points[i].Name < summary.Points[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for hash, value := range interval.Gauges {
|
|
||||||
value.Hash = hash
|
|
||||||
value.DisplayLabels = make(map[string]string)
|
|
||||||
for _, label := range value.Labels {
|
|
||||||
value.DisplayLabels[label.Name] = label.Value
|
|
||||||
}
|
|
||||||
value.Labels = nil
|
|
||||||
|
|
||||||
summary.Gauges = append(summary.Gauges, value)
|
|
||||||
}
|
|
||||||
sort.Slice(summary.Gauges, func(i, j int) bool {
|
|
||||||
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
|
|
||||||
})
|
|
||||||
|
|
||||||
summary.Counters = formatSamples(interval.Counters)
|
|
||||||
summary.Samples = formatSamples(interval.Samples)
|
|
||||||
|
|
||||||
return summary
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatSamples(source map[string]SampledValue) []SampledValue {
|
|
||||||
output := make([]SampledValue, 0, len(source))
|
|
||||||
for hash, sample := range source {
|
|
||||||
displayLabels := make(map[string]string)
|
|
||||||
for _, label := range sample.Labels {
|
|
||||||
displayLabels[label.Name] = label.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
output = append(output, SampledValue{
|
|
||||||
Name: sample.Name,
|
|
||||||
Hash: hash,
|
|
||||||
AggregateSample: sample.AggregateSample,
|
|
||||||
Mean: sample.AggregateSample.Mean(),
|
|
||||||
Stddev: sample.AggregateSample.Stddev(),
|
|
||||||
DisplayLabels: displayLabels,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sort.Slice(output, func(i, j int) bool {
|
|
||||||
return output[i].Hash < output[j].Hash
|
|
||||||
})
|
|
||||||
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
type Encoder interface {
|
|
||||||
Encode(interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream writes metrics using encoder.Encode each time an interval ends. Runs
|
|
||||||
// until the request context is cancelled, or the encoder returns an error.
|
|
||||||
// The caller is responsible for logging any errors from encoder.
|
|
||||||
func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) {
|
|
||||||
interval := i.getInterval()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-interval.done:
|
|
||||||
summary := newMetricSummaryFromInterval(interval)
|
|
||||||
if err := encoder.Encode(summary); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// update interval to the next one
|
|
||||||
interval = i.getInterval()
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InmemSignal is used to listen for a given signal, and when received,
|
|
||||||
// to dump the current metrics from the InmemSink to an io.Writer
|
|
||||||
type InmemSignal struct {
|
|
||||||
signal syscall.Signal
|
|
||||||
inm *InmemSink
|
|
||||||
w io.Writer
|
|
||||||
sigCh chan os.Signal
|
|
||||||
|
|
||||||
stop bool
|
|
||||||
stopCh chan struct{}
|
|
||||||
stopLock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
|
|
||||||
// and dumps the current metrics out to a writer
|
|
||||||
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
|
|
||||||
i := &InmemSignal{
|
|
||||||
signal: sig,
|
|
||||||
inm: inmem,
|
|
||||||
w: w,
|
|
||||||
sigCh: make(chan os.Signal, 1),
|
|
||||||
stopCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
signal.Notify(i.sigCh, sig)
|
|
||||||
go i.run()
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
|
|
||||||
// and writes output to stderr. Windows uses SIGBREAK
|
|
||||||
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
|
|
||||||
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop is used to stop the InmemSignal from listening
|
|
||||||
func (i *InmemSignal) Stop() {
|
|
||||||
i.stopLock.Lock()
|
|
||||||
defer i.stopLock.Unlock()
|
|
||||||
|
|
||||||
if i.stop {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
i.stop = true
|
|
||||||
close(i.stopCh)
|
|
||||||
signal.Stop(i.sigCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// run is a long running routine that handles signals
|
|
||||||
func (i *InmemSignal) run() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-i.sigCh:
|
|
||||||
i.dumpStats()
|
|
||||||
case <-i.stopCh:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpStats is used to dump the data to output writer
|
|
||||||
func (i *InmemSignal) dumpStats() {
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
data := i.inm.Data()
|
|
||||||
// Skip the last period which is still being aggregated
|
|
||||||
for j := 0; j < len(data)-1; j++ {
|
|
||||||
intv := data[j]
|
|
||||||
intv.RLock()
|
|
||||||
for _, val := range intv.Gauges {
|
|
||||||
name := i.flattenLabels(val.Name, val.Labels)
|
|
||||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
|
|
||||||
}
|
|
||||||
for name, vals := range intv.Points {
|
|
||||||
for _, val := range vals {
|
|
||||||
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, agg := range intv.Counters {
|
|
||||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
|
||||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
|
||||||
}
|
|
||||||
for _, agg := range intv.Samples {
|
|
||||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
|
||||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
|
||||||
}
|
|
||||||
intv.RUnlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write out the bytes
|
|
||||||
i.w.Write(buf.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key for formatting along with its labels, removes spaces
|
|
||||||
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
|
|
||||||
buf := bytes.NewBufferString(name)
|
|
||||||
replacer := strings.NewReplacer(" ", "_", ":", "_")
|
|
||||||
|
|
||||||
for _, label := range labels {
|
|
||||||
replacer.WriteString(buf, ".")
|
|
||||||
replacer.WriteString(buf, label.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
293
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
293
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
@ -1,293 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-immutable-radix"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Label struct {
|
|
||||||
Name string
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) SetGauge(key []string, val float32) {
|
|
||||||
m.SetGaugeWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
if m.HostName != "" {
|
|
||||||
if m.EnableHostnameLabel {
|
|
||||||
labels = append(labels, Label{"host", m.HostName})
|
|
||||||
} else if m.EnableHostname {
|
|
||||||
key = insert(0, m.HostName, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.EnableTypePrefix {
|
|
||||||
key = insert(0, "gauge", key)
|
|
||||||
}
|
|
||||||
if m.ServiceName != "" {
|
|
||||||
if m.EnableServiceLabel {
|
|
||||||
labels = append(labels, Label{"service", m.ServiceName})
|
|
||||||
} else {
|
|
||||||
key = insert(0, m.ServiceName, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
|
||||||
if !allowed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) EmitKey(key []string, val float32) {
|
|
||||||
if m.EnableTypePrefix {
|
|
||||||
key = insert(0, "kv", key)
|
|
||||||
}
|
|
||||||
if m.ServiceName != "" {
|
|
||||||
key = insert(0, m.ServiceName, key)
|
|
||||||
}
|
|
||||||
allowed, _ := m.allowMetric(key, nil)
|
|
||||||
if !allowed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.sink.EmitKey(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) IncrCounter(key []string, val float32) {
|
|
||||||
m.IncrCounterWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
if m.HostName != "" && m.EnableHostnameLabel {
|
|
||||||
labels = append(labels, Label{"host", m.HostName})
|
|
||||||
}
|
|
||||||
if m.EnableTypePrefix {
|
|
||||||
key = insert(0, "counter", key)
|
|
||||||
}
|
|
||||||
if m.ServiceName != "" {
|
|
||||||
if m.EnableServiceLabel {
|
|
||||||
labels = append(labels, Label{"service", m.ServiceName})
|
|
||||||
} else {
|
|
||||||
key = insert(0, m.ServiceName, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
|
||||||
if !allowed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) AddSample(key []string, val float32) {
|
|
||||||
m.AddSampleWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
if m.HostName != "" && m.EnableHostnameLabel {
|
|
||||||
labels = append(labels, Label{"host", m.HostName})
|
|
||||||
}
|
|
||||||
if m.EnableTypePrefix {
|
|
||||||
key = insert(0, "sample", key)
|
|
||||||
}
|
|
||||||
if m.ServiceName != "" {
|
|
||||||
if m.EnableServiceLabel {
|
|
||||||
labels = append(labels, Label{"service", m.ServiceName})
|
|
||||||
} else {
|
|
||||||
key = insert(0, m.ServiceName, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
|
||||||
if !allowed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) MeasureSince(key []string, start time.Time) {
|
|
||||||
m.MeasureSinceWithLabels(key, start, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
|
||||||
if m.HostName != "" && m.EnableHostnameLabel {
|
|
||||||
labels = append(labels, Label{"host", m.HostName})
|
|
||||||
}
|
|
||||||
if m.EnableTypePrefix {
|
|
||||||
key = insert(0, "timer", key)
|
|
||||||
}
|
|
||||||
if m.ServiceName != "" {
|
|
||||||
if m.EnableServiceLabel {
|
|
||||||
labels = append(labels, Label{"service", m.ServiceName})
|
|
||||||
} else {
|
|
||||||
key = insert(0, m.ServiceName, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
|
||||||
if !allowed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
now := time.Now()
|
|
||||||
elapsed := now.Sub(start)
|
|
||||||
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
|
|
||||||
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFilter overwrites the existing filter with the given rules.
|
|
||||||
func (m *Metrics) UpdateFilter(allow, block []string) {
|
|
||||||
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
|
|
||||||
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
|
||||||
m.filterLock.Lock()
|
|
||||||
defer m.filterLock.Unlock()
|
|
||||||
|
|
||||||
m.AllowedPrefixes = allow
|
|
||||||
m.BlockedPrefixes = block
|
|
||||||
|
|
||||||
if allowedLabels == nil {
|
|
||||||
// Having a white list means we take only elements from it
|
|
||||||
m.allowedLabels = nil
|
|
||||||
} else {
|
|
||||||
m.allowedLabels = make(map[string]bool)
|
|
||||||
for _, v := range allowedLabels {
|
|
||||||
m.allowedLabels[v] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.blockedLabels = make(map[string]bool)
|
|
||||||
for _, v := range blockedLabels {
|
|
||||||
m.blockedLabels[v] = true
|
|
||||||
}
|
|
||||||
m.AllowedLabels = allowedLabels
|
|
||||||
m.BlockedLabels = blockedLabels
|
|
||||||
|
|
||||||
m.filter = iradix.New()
|
|
||||||
for _, prefix := range m.AllowedPrefixes {
|
|
||||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
|
|
||||||
}
|
|
||||||
for _, prefix := range m.BlockedPrefixes {
|
|
||||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// labelIsAllowed return true if a should be included in metric
|
|
||||||
// the caller should lock m.filterLock while calling this method
|
|
||||||
func (m *Metrics) labelIsAllowed(label *Label) bool {
|
|
||||||
labelName := (*label).Name
|
|
||||||
if m.blockedLabels != nil {
|
|
||||||
_, ok := m.blockedLabels[labelName]
|
|
||||||
if ok {
|
|
||||||
// If present, let's remove this label
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.allowedLabels != nil {
|
|
||||||
_, ok := m.allowedLabels[labelName]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
// Allow by default
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterLabels return only allowed labels
|
|
||||||
// the caller should lock m.filterLock while calling this method
|
|
||||||
func (m *Metrics) filterLabels(labels []Label) []Label {
|
|
||||||
if labels == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
toReturn := []Label{}
|
|
||||||
for _, label := range labels {
|
|
||||||
if m.labelIsAllowed(&label) {
|
|
||||||
toReturn = append(toReturn, label)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return toReturn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns whether the metric should be allowed based on configured prefix filters
|
|
||||||
// Also return the applicable labels
|
|
||||||
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
|
|
||||||
m.filterLock.RLock()
|
|
||||||
defer m.filterLock.RUnlock()
|
|
||||||
|
|
||||||
if m.filter == nil || m.filter.Len() == 0 {
|
|
||||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
|
|
||||||
if !ok {
|
|
||||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
return allowed.(bool), m.filterLabels(labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Periodically collects runtime stats to publish
|
|
||||||
func (m *Metrics) collectStats() {
|
|
||||||
for {
|
|
||||||
time.Sleep(m.ProfileInterval)
|
|
||||||
m.EmitRuntimeStats()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emits various runtime statsitics
|
|
||||||
func (m *Metrics) EmitRuntimeStats() {
|
|
||||||
// Export number of Goroutines
|
|
||||||
numRoutines := runtime.NumGoroutine()
|
|
||||||
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
|
|
||||||
|
|
||||||
// Export memory stats
|
|
||||||
var stats runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&stats)
|
|
||||||
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
|
|
||||||
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
|
|
||||||
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
|
|
||||||
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
|
|
||||||
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
|
|
||||||
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
|
|
||||||
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
|
|
||||||
|
|
||||||
// Export info about the last few GC runs
|
|
||||||
num := stats.NumGC
|
|
||||||
|
|
||||||
// Handle wrap around
|
|
||||||
if num < m.lastNumGC {
|
|
||||||
m.lastNumGC = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we don't scan more than 256
|
|
||||||
if num-m.lastNumGC >= 256 {
|
|
||||||
m.lastNumGC = num - 255
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := m.lastNumGC; i < num; i++ {
|
|
||||||
pause := stats.PauseNs[i%256]
|
|
||||||
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
|
|
||||||
}
|
|
||||||
m.lastNumGC = num
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new slice with the provided string value as the first element
|
|
||||||
// and the provided slice values as the remaining values.
|
|
||||||
// Ordering of the values in the provided input slice is kept in tact in the output slice.
|
|
||||||
func insert(i int, v string, s []string) []string {
|
|
||||||
// Allocate new slice to avoid modifying the input slice
|
|
||||||
newS := make([]string, len(s)+1)
|
|
||||||
|
|
||||||
// Copy s[0, i-1] into newS
|
|
||||||
for j := 0; j < i; j++ {
|
|
||||||
newS[j] = s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert provided element at index i
|
|
||||||
newS[i] = v
|
|
||||||
|
|
||||||
// Copy s[i, len(s)-1] into newS starting at newS[i+1]
|
|
||||||
for j := i; j < len(s); j++ {
|
|
||||||
newS[j+1] = s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
return newS
|
|
||||||
}
|
|
115
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
115
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
@ -1,115 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The MetricSink interface is used to transmit metrics information
|
|
||||||
// to an external system
|
|
||||||
type MetricSink interface {
|
|
||||||
// A Gauge should retain the last value it is set to
|
|
||||||
SetGauge(key []string, val float32)
|
|
||||||
SetGaugeWithLabels(key []string, val float32, labels []Label)
|
|
||||||
|
|
||||||
// Should emit a Key/Value pair for each call
|
|
||||||
EmitKey(key []string, val float32)
|
|
||||||
|
|
||||||
// Counters should accumulate values
|
|
||||||
IncrCounter(key []string, val float32)
|
|
||||||
IncrCounterWithLabels(key []string, val float32, labels []Label)
|
|
||||||
|
|
||||||
// Samples are for timing information, where quantiles are used
|
|
||||||
AddSample(key []string, val float32)
|
|
||||||
AddSampleWithLabels(key []string, val float32, labels []Label)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlackholeSink is used to just blackhole messages
|
|
||||||
type BlackholeSink struct{}
|
|
||||||
|
|
||||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
|
||||||
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
|
|
||||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
|
||||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
|
||||||
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
|
|
||||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
|
||||||
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
|
|
||||||
|
|
||||||
// FanoutSink is used to sink to fanout values to multiple sinks
|
|
||||||
type FanoutSink []MetricSink
|
|
||||||
|
|
||||||
func (fh FanoutSink) SetGauge(key []string, val float32) {
|
|
||||||
fh.SetGaugeWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
for _, s := range fh {
|
|
||||||
s.SetGaugeWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) EmitKey(key []string, val float32) {
|
|
||||||
for _, s := range fh {
|
|
||||||
s.EmitKey(key, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) IncrCounter(key []string, val float32) {
|
|
||||||
fh.IncrCounterWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
for _, s := range fh {
|
|
||||||
s.IncrCounterWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) AddSample(key []string, val float32) {
|
|
||||||
fh.AddSampleWithLabels(key, val, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
for _, s := range fh {
|
|
||||||
s.AddSampleWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
|
|
||||||
// by each sink type
|
|
||||||
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
|
|
||||||
|
|
||||||
// sinkRegistry supports the generic NewMetricSink function by mapping URL
|
|
||||||
// schemes to metric sink factory functions
|
|
||||||
var sinkRegistry = map[string]sinkURLFactoryFunc{
|
|
||||||
"statsd": NewStatsdSinkFromURL,
|
|
||||||
"statsite": NewStatsiteSinkFromURL,
|
|
||||||
"inmem": NewInmemSinkFromURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMetricSinkFromURL allows a generic URL input to configure any of the
|
|
||||||
// supported sinks. The scheme of the URL identifies the type of the sink, the
|
|
||||||
// and query parameters are used to set options.
|
|
||||||
//
|
|
||||||
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
|
|
||||||
// as the "addr" of the sink
|
|
||||||
//
|
|
||||||
// "statsite://" - Initializes a StatsiteSink. The host and port become the
|
|
||||||
// "addr" of the sink
|
|
||||||
//
|
|
||||||
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
|
|
||||||
// "interval" and "duration" query parameters must be specified with valid
|
|
||||||
// durations, see NewInmemSink for details.
|
|
||||||
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
|
|
||||||
u, err := url.Parse(urlStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
|
|
||||||
if sinkURLFactoryFunc == nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sinkURLFactoryFunc(u)
|
|
||||||
}
|
|
146
vendor/github.com/armon/go-metrics/start.go
generated
vendored
146
vendor/github.com/armon/go-metrics/start.go
generated
vendored
@ -1,146 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
iradix "github.com/hashicorp/go-immutable-radix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config is used to configure metrics settings
|
|
||||||
type Config struct {
|
|
||||||
ServiceName string // Prefixed with keys to separate services
|
|
||||||
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
|
|
||||||
EnableHostname bool // Enable prefixing gauge values with hostname
|
|
||||||
EnableHostnameLabel bool // Enable adding hostname to labels
|
|
||||||
EnableServiceLabel bool // Enable adding service to labels
|
|
||||||
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
|
|
||||||
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
|
|
||||||
TimerGranularity time.Duration // Granularity of timers.
|
|
||||||
ProfileInterval time.Duration // Interval to profile runtime metrics
|
|
||||||
|
|
||||||
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
|
|
||||||
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
|
|
||||||
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
|
|
||||||
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
|
|
||||||
FilterDefault bool // Whether to allow metrics by default
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics represents an instance of a metrics sink that can
|
|
||||||
// be used to emit
|
|
||||||
type Metrics struct {
|
|
||||||
Config
|
|
||||||
lastNumGC uint32
|
|
||||||
sink MetricSink
|
|
||||||
filter *iradix.Tree
|
|
||||||
allowedLabels map[string]bool
|
|
||||||
blockedLabels map[string]bool
|
|
||||||
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shared global metrics instance
|
|
||||||
var globalMetrics atomic.Value // *Metrics
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Initialize to a blackhole sink to avoid errors
|
|
||||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default returns the shared global metrics instance.
|
|
||||||
func Default() *Metrics {
|
|
||||||
return globalMetrics.Load().(*Metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultConfig provides a sane default configuration
|
|
||||||
func DefaultConfig(serviceName string) *Config {
|
|
||||||
c := &Config{
|
|
||||||
ServiceName: serviceName, // Use client provided service
|
|
||||||
HostName: "",
|
|
||||||
EnableHostname: true, // Enable hostname prefix
|
|
||||||
EnableRuntimeMetrics: true, // Enable runtime profiling
|
|
||||||
EnableTypePrefix: false, // Disable type prefix
|
|
||||||
TimerGranularity: time.Millisecond, // Timers are in milliseconds
|
|
||||||
ProfileInterval: time.Second, // Poll runtime every second
|
|
||||||
FilterDefault: true, // Don't filter metrics by default
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get the hostname
|
|
||||||
name, _ := os.Hostname()
|
|
||||||
c.HostName = name
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// New is used to create a new instance of Metrics
|
|
||||||
func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
|
||||||
met := &Metrics{}
|
|
||||||
met.Config = *conf
|
|
||||||
met.sink = sink
|
|
||||||
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
|
|
||||||
|
|
||||||
// Start the runtime collector
|
|
||||||
if conf.EnableRuntimeMetrics {
|
|
||||||
go met.collectStats()
|
|
||||||
}
|
|
||||||
return met, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGlobal is the same as New, but it assigns the metrics object to be
|
|
||||||
// used globally as well as returning it.
|
|
||||||
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
|
|
||||||
metrics, err := New(conf, sink)
|
|
||||||
if err == nil {
|
|
||||||
globalMetrics.Store(metrics)
|
|
||||||
}
|
|
||||||
return metrics, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Proxy all the methods to the globalMetrics instance
|
|
||||||
func SetGauge(key []string, val float32) {
|
|
||||||
globalMetrics.Load().(*Metrics).SetGauge(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func EmitKey(key []string, val float32) {
|
|
||||||
globalMetrics.Load().(*Metrics).EmitKey(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func IncrCounter(key []string, val float32) {
|
|
||||||
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddSample(key []string, val float32) {
|
|
||||||
globalMetrics.Load().(*Metrics).AddSample(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MeasureSince(key []string, start time.Time) {
|
|
||||||
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
|
||||||
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpdateFilter(allow, block []string) {
|
|
||||||
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
|
|
||||||
// and blockedLabels - when not nil - allow filtering of labels in order to
|
|
||||||
// block/allow globally labels (especially useful when having large number of
|
|
||||||
// values for a given label). See README.md for more information about usage.
|
|
||||||
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
|
||||||
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
|
|
||||||
}
|
|
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
@ -1,184 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// statsdMaxLen is the maximum size of a packet
|
|
||||||
// to send to statsd
|
|
||||||
statsdMaxLen = 1400
|
|
||||||
)
|
|
||||||
|
|
||||||
// StatsdSink provides a MetricSink that can be used
|
|
||||||
// with a statsite or statsd metrics server. It uses
|
|
||||||
// only UDP packets, while StatsiteSink uses TCP.
|
|
||||||
type StatsdSink struct {
|
|
||||||
addr string
|
|
||||||
metricQueue chan string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
|
|
||||||
// (and tested) from NewMetricSinkFromURL.
|
|
||||||
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
|
|
||||||
return NewStatsdSink(u.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStatsdSink is used to create a new StatsdSink
|
|
||||||
func NewStatsdSink(addr string) (*StatsdSink, error) {
|
|
||||||
s := &StatsdSink{
|
|
||||||
addr: addr,
|
|
||||||
metricQueue: make(chan string, 4096),
|
|
||||||
}
|
|
||||||
go s.flushMetrics()
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is used to stop flushing to statsd
|
|
||||||
func (s *StatsdSink) Shutdown() {
|
|
||||||
close(s.metricQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) SetGauge(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) EmitKey(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) IncrCounter(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) AddSample(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key for formatting, removes spaces
|
|
||||||
func (s *StatsdSink) flattenKey(parts []string) string {
|
|
||||||
joined := strings.Join(parts, ".")
|
|
||||||
return strings.Map(func(r rune) rune {
|
|
||||||
switch r {
|
|
||||||
case ':':
|
|
||||||
fallthrough
|
|
||||||
case ' ':
|
|
||||||
return '_'
|
|
||||||
default:
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}, joined)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key along with labels for formatting, removes spaces
|
|
||||||
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
|
|
||||||
for _, label := range labels {
|
|
||||||
parts = append(parts, label.Value)
|
|
||||||
}
|
|
||||||
return s.flattenKey(parts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Does a non-blocking push to the metrics queue
|
|
||||||
func (s *StatsdSink) pushMetric(m string) {
|
|
||||||
select {
|
|
||||||
case s.metricQueue <- m:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushes metrics
|
|
||||||
func (s *StatsdSink) flushMetrics() {
|
|
||||||
var sock net.Conn
|
|
||||||
var err error
|
|
||||||
var wait <-chan time.Time
|
|
||||||
ticker := time.NewTicker(flushInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
CONNECT:
|
|
||||||
// Create a buffer
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
// Attempt to connect
|
|
||||||
sock, err = net.Dial("udp", s.addr)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case metric, ok := <-s.metricQueue:
|
|
||||||
// Get a metric from the queue
|
|
||||||
if !ok {
|
|
||||||
goto QUIT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this would overflow the packet size
|
|
||||||
if len(metric)+buf.Len() > statsdMaxLen {
|
|
||||||
_, err := sock.Write(buf.Bytes())
|
|
||||||
buf.Reset()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append to the buffer
|
|
||||||
buf.WriteString(metric)
|
|
||||||
|
|
||||||
case <-ticker.C:
|
|
||||||
if buf.Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := sock.Write(buf.Bytes())
|
|
||||||
buf.Reset()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
WAIT:
|
|
||||||
// Wait for a while
|
|
||||||
wait = time.After(time.Duration(5) * time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
// Dequeue the messages to avoid backlog
|
|
||||||
case _, ok := <-s.metricQueue:
|
|
||||||
if !ok {
|
|
||||||
goto QUIT
|
|
||||||
}
|
|
||||||
case <-wait:
|
|
||||||
goto CONNECT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
QUIT:
|
|
||||||
s.metricQueue = nil
|
|
||||||
}
|
|
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
@ -1,172 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// We force flush the statsite metrics after this period of
|
|
||||||
// inactivity. Prevents stats from getting stuck in a buffer
|
|
||||||
// forever.
|
|
||||||
flushInterval = 100 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
|
|
||||||
// (and tested) from NewMetricSinkFromURL.
|
|
||||||
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
|
|
||||||
return NewStatsiteSink(u.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatsiteSink provides a MetricSink that can be used with a
|
|
||||||
// statsite metrics server
|
|
||||||
type StatsiteSink struct {
|
|
||||||
addr string
|
|
||||||
metricQueue chan string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStatsiteSink is used to create a new StatsiteSink
|
|
||||||
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
|
|
||||||
s := &StatsiteSink{
|
|
||||||
addr: addr,
|
|
||||||
metricQueue: make(chan string, 4096),
|
|
||||||
}
|
|
||||||
go s.flushMetrics()
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is used to stop flushing to statsite
|
|
||||||
func (s *StatsiteSink) Shutdown() {
|
|
||||||
close(s.metricQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) SetGauge(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) EmitKey(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) AddSample(key []string, val float32) {
|
|
||||||
flatKey := s.flattenKey(key)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
|
||||||
flatKey := s.flattenKeyLabels(key, labels)
|
|
||||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key for formatting, removes spaces
|
|
||||||
func (s *StatsiteSink) flattenKey(parts []string) string {
|
|
||||||
joined := strings.Join(parts, ".")
|
|
||||||
return strings.Map(func(r rune) rune {
|
|
||||||
switch r {
|
|
||||||
case ':':
|
|
||||||
fallthrough
|
|
||||||
case ' ':
|
|
||||||
return '_'
|
|
||||||
default:
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}, joined)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the key along with labels for formatting, removes spaces
|
|
||||||
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
|
|
||||||
for _, label := range labels {
|
|
||||||
parts = append(parts, label.Value)
|
|
||||||
}
|
|
||||||
return s.flattenKey(parts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Does a non-blocking push to the metrics queue
|
|
||||||
func (s *StatsiteSink) pushMetric(m string) {
|
|
||||||
select {
|
|
||||||
case s.metricQueue <- m:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushes metrics
|
|
||||||
func (s *StatsiteSink) flushMetrics() {
|
|
||||||
var sock net.Conn
|
|
||||||
var err error
|
|
||||||
var wait <-chan time.Time
|
|
||||||
var buffered *bufio.Writer
|
|
||||||
ticker := time.NewTicker(flushInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
CONNECT:
|
|
||||||
// Attempt to connect
|
|
||||||
sock, err = net.Dial("tcp", s.addr)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a buffered writer
|
|
||||||
buffered = bufio.NewWriter(sock)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case metric, ok := <-s.metricQueue:
|
|
||||||
// Get a metric from the queue
|
|
||||||
if !ok {
|
|
||||||
goto QUIT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to send to statsite
|
|
||||||
_, err := buffered.Write([]byte(metric))
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
case <-ticker.C:
|
|
||||||
if err := buffered.Flush(); err != nil {
|
|
||||||
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
|
|
||||||
goto WAIT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
WAIT:
|
|
||||||
// Wait for a while
|
|
||||||
wait = time.After(time.Duration(5) * time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
// Dequeue the messages to avoid backlog
|
|
||||||
case _, ok := <-s.metricQueue:
|
|
||||||
if !ok {
|
|
||||||
goto QUIT
|
|
||||||
}
|
|
||||||
case <-wait:
|
|
||||||
goto CONNECT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
QUIT:
|
|
||||||
s.metricQueue = nil
|
|
||||||
}
|
|
22
vendor/github.com/armon/go-radix/.gitignore
generated
vendored
22
vendor/github.com/armon/go-radix/.gitignore
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
3
vendor/github.com/armon/go-radix/.travis.yml
generated
vendored
3
vendor/github.com/armon/go-radix/.travis.yml
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- tip
|
|
20
vendor/github.com/armon/go-radix/LICENSE
generated
vendored
20
vendor/github.com/armon/go-radix/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Armon Dadgar
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
38
vendor/github.com/armon/go-radix/README.md
generated
vendored
38
vendor/github.com/armon/go-radix/README.md
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
|
|
||||||
=========
|
|
||||||
|
|
||||||
Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
|
||||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
|
||||||
|
|
||||||
As a radix tree, it provides the following:
|
|
||||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
|
||||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
|
||||||
* Minimum / Maximum value lookups
|
|
||||||
* Ordered iteration
|
|
||||||
|
|
||||||
For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
=============
|
|
||||||
|
|
||||||
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
|
|
||||||
|
|
||||||
Example
|
|
||||||
=======
|
|
||||||
|
|
||||||
Below is a simple example of usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Create a tree
|
|
||||||
r := radix.New()
|
|
||||||
r.Insert("foo", 1)
|
|
||||||
r.Insert("bar", 2)
|
|
||||||
r.Insert("foobar", 2)
|
|
||||||
|
|
||||||
// Find the longest prefix match
|
|
||||||
m, _, _ := r.LongestPrefix("foozip")
|
|
||||||
if m != "foo" {
|
|
||||||
panic("should be foo")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
540
vendor/github.com/armon/go-radix/radix.go
generated
vendored
540
vendor/github.com/armon/go-radix/radix.go
generated
vendored
@ -1,540 +0,0 @@
|
|||||||
package radix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WalkFn is used when walking the tree. Takes a
|
|
||||||
// key and value, returning if iteration should
|
|
||||||
// be terminated.
|
|
||||||
type WalkFn func(s string, v interface{}) bool
|
|
||||||
|
|
||||||
// leafNode is used to represent a value
|
|
||||||
type leafNode struct {
|
|
||||||
key string
|
|
||||||
val interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// edge is used to represent an edge node
|
|
||||||
type edge struct {
|
|
||||||
label byte
|
|
||||||
node *node
|
|
||||||
}
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
// leaf is used to store possible leaf
|
|
||||||
leaf *leafNode
|
|
||||||
|
|
||||||
// prefix is the common prefix we ignore
|
|
||||||
prefix string
|
|
||||||
|
|
||||||
// Edges should be stored in-order for iteration.
|
|
||||||
// We avoid a fully materialized slice to save memory,
|
|
||||||
// since in most cases we expect to be sparse
|
|
||||||
edges edges
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) isLeaf() bool {
|
|
||||||
return n.leaf != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) addEdge(e edge) {
|
|
||||||
n.edges = append(n.edges, e)
|
|
||||||
n.edges.Sort()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) updateEdge(label byte, node *node) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == label {
|
|
||||||
n.edges[idx].node = node
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic("replacing missing edge")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) getEdge(label byte) *node {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == label {
|
|
||||||
return n.edges[idx].node
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) delEdge(label byte) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == label {
|
|
||||||
copy(n.edges[idx:], n.edges[idx+1:])
|
|
||||||
n.edges[len(n.edges)-1] = edge{}
|
|
||||||
n.edges = n.edges[:len(n.edges)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type edges []edge
|
|
||||||
|
|
||||||
func (e edges) Len() int {
|
|
||||||
return len(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Less(i, j int) bool {
|
|
||||||
return e[i].label < e[j].label
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Swap(i, j int) {
|
|
||||||
e[i], e[j] = e[j], e[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Sort() {
|
|
||||||
sort.Sort(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tree implements a radix tree. This can be treated as a
|
|
||||||
// Dictionary abstract data type. The main advantage over
|
|
||||||
// a standard hash map is prefix-based lookups and
|
|
||||||
// ordered iteration,
|
|
||||||
type Tree struct {
|
|
||||||
root *node
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an empty Tree
|
|
||||||
func New() *Tree {
|
|
||||||
return NewFromMap(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFromMap returns a new tree containing the keys
|
|
||||||
// from an existing map
|
|
||||||
func NewFromMap(m map[string]interface{}) *Tree {
|
|
||||||
t := &Tree{root: &node{}}
|
|
||||||
for k, v := range m {
|
|
||||||
t.Insert(k, v)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len is used to return the number of elements in the tree
|
|
||||||
func (t *Tree) Len() int {
|
|
||||||
return t.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// longestPrefix finds the length of the shared prefix
|
|
||||||
// of two strings
|
|
||||||
func longestPrefix(k1, k2 string) int {
|
|
||||||
max := len(k1)
|
|
||||||
if l := len(k2); l < max {
|
|
||||||
max = l
|
|
||||||
}
|
|
||||||
var i int
|
|
||||||
for i = 0; i < max; i++ {
|
|
||||||
if k1[i] != k2[i] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert is used to add a newentry or update
|
|
||||||
// an existing entry. Returns if updated.
|
|
||||||
func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
|
|
||||||
var parent *node
|
|
||||||
n := t.root
|
|
||||||
search := s
|
|
||||||
for {
|
|
||||||
// Handle key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
if n.isLeaf() {
|
|
||||||
old := n.leaf.val
|
|
||||||
n.leaf.val = v
|
|
||||||
return old, true
|
|
||||||
}
|
|
||||||
|
|
||||||
n.leaf = &leafNode{
|
|
||||||
key: s,
|
|
||||||
val: v,
|
|
||||||
}
|
|
||||||
t.size++
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for the edge
|
|
||||||
parent = n
|
|
||||||
n = n.getEdge(search[0])
|
|
||||||
|
|
||||||
// No edge, create one
|
|
||||||
if n == nil {
|
|
||||||
e := edge{
|
|
||||||
label: search[0],
|
|
||||||
node: &node{
|
|
||||||
leaf: &leafNode{
|
|
||||||
key: s,
|
|
||||||
val: v,
|
|
||||||
},
|
|
||||||
prefix: search,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
parent.addEdge(e)
|
|
||||||
t.size++
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine longest prefix of the search key on match
|
|
||||||
commonPrefix := longestPrefix(search, n.prefix)
|
|
||||||
if commonPrefix == len(n.prefix) {
|
|
||||||
search = search[commonPrefix:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split the node
|
|
||||||
t.size++
|
|
||||||
child := &node{
|
|
||||||
prefix: search[:commonPrefix],
|
|
||||||
}
|
|
||||||
parent.updateEdge(search[0], child)
|
|
||||||
|
|
||||||
// Restore the existing node
|
|
||||||
child.addEdge(edge{
|
|
||||||
label: n.prefix[commonPrefix],
|
|
||||||
node: n,
|
|
||||||
})
|
|
||||||
n.prefix = n.prefix[commonPrefix:]
|
|
||||||
|
|
||||||
// Create a new leaf node
|
|
||||||
leaf := &leafNode{
|
|
||||||
key: s,
|
|
||||||
val: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the new key is a subset, add to to this node
|
|
||||||
search = search[commonPrefix:]
|
|
||||||
if len(search) == 0 {
|
|
||||||
child.leaf = leaf
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new edge for the node
|
|
||||||
child.addEdge(edge{
|
|
||||||
label: search[0],
|
|
||||||
node: &node{
|
|
||||||
leaf: leaf,
|
|
||||||
prefix: search,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete is used to delete a key, returning the previous
|
|
||||||
// value and if it was deleted
|
|
||||||
func (t *Tree) Delete(s string) (interface{}, bool) {
|
|
||||||
var parent *node
|
|
||||||
var label byte
|
|
||||||
n := t.root
|
|
||||||
search := s
|
|
||||||
for {
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
if !n.isLeaf() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
goto DELETE
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
parent = n
|
|
||||||
label = search[0]
|
|
||||||
n = n.getEdge(label)
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if strings.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
|
|
||||||
DELETE:
|
|
||||||
// Delete the leaf
|
|
||||||
leaf := n.leaf
|
|
||||||
n.leaf = nil
|
|
||||||
t.size--
|
|
||||||
|
|
||||||
// Check if we should delete this node from the parent
|
|
||||||
if parent != nil && len(n.edges) == 0 {
|
|
||||||
parent.delEdge(label)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we should merge this node
|
|
||||||
if n != t.root && len(n.edges) == 1 {
|
|
||||||
n.mergeChild()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we should merge the parent's other child
|
|
||||||
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
|
|
||||||
parent.mergeChild()
|
|
||||||
}
|
|
||||||
|
|
||||||
return leaf.val, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePrefix is used to delete the subtree under a prefix
|
|
||||||
// Returns how many nodes were deleted
|
|
||||||
// Use this to delete large subtrees efficiently
|
|
||||||
func (t *Tree) DeletePrefix(s string) int {
|
|
||||||
return t.deletePrefix(nil, t.root, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete does a recursive deletion
|
|
||||||
func (t *Tree) deletePrefix(parent, n *node, prefix string) int {
|
|
||||||
// Check for key exhaustion
|
|
||||||
if len(prefix) == 0 {
|
|
||||||
// Remove the leaf node
|
|
||||||
subTreeSize := 0
|
|
||||||
//recursively walk from all edges of the node to be deleted
|
|
||||||
recursiveWalk(n, func(s string, v interface{}) bool {
|
|
||||||
subTreeSize++
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
if n.isLeaf() {
|
|
||||||
n.leaf = nil
|
|
||||||
}
|
|
||||||
n.edges = nil // deletes the entire subtree
|
|
||||||
|
|
||||||
// Check if we should merge the parent's other child
|
|
||||||
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
|
|
||||||
parent.mergeChild()
|
|
||||||
}
|
|
||||||
t.size -= subTreeSize
|
|
||||||
return subTreeSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
label := prefix[0]
|
|
||||||
child := n.getEdge(label)
|
|
||||||
if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if len(child.prefix) > len(prefix) {
|
|
||||||
prefix = prefix[len(prefix):]
|
|
||||||
} else {
|
|
||||||
prefix = prefix[len(child.prefix):]
|
|
||||||
}
|
|
||||||
return t.deletePrefix(n, child, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) mergeChild() {
|
|
||||||
e := n.edges[0]
|
|
||||||
child := e.node
|
|
||||||
n.prefix = n.prefix + child.prefix
|
|
||||||
n.leaf = child.leaf
|
|
||||||
n.edges = child.edges
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get is used to lookup a specific key, returning
|
|
||||||
// the value and if it was found
|
|
||||||
func (t *Tree) Get(s string) (interface{}, bool) {
|
|
||||||
n := t.root
|
|
||||||
search := s
|
|
||||||
for {
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.val, true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if strings.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// LongestPrefix is like Get, but instead of an
|
|
||||||
// exact match, it will return the longest prefix match.
|
|
||||||
func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
|
|
||||||
var last *leafNode
|
|
||||||
n := t.root
|
|
||||||
search := s
|
|
||||||
for {
|
|
||||||
// Look for a leaf node
|
|
||||||
if n.isLeaf() {
|
|
||||||
last = n.leaf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if strings.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last != nil {
|
|
||||||
return last.key, last.val, true
|
|
||||||
}
|
|
||||||
return "", nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum is used to return the minimum value in the tree
|
|
||||||
func (t *Tree) Minimum() (string, interface{}, bool) {
|
|
||||||
n := t.root
|
|
||||||
for {
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.key, n.leaf.val, true
|
|
||||||
}
|
|
||||||
if len(n.edges) > 0 {
|
|
||||||
n = n.edges[0].node
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum is used to return the maximum value in the tree
|
|
||||||
func (t *Tree) Maximum() (string, interface{}, bool) {
|
|
||||||
n := t.root
|
|
||||||
for {
|
|
||||||
if num := len(n.edges); num > 0 {
|
|
||||||
n = n.edges[num-1].node
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.key, n.leaf.val, true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return "", nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk is used to walk the tree
|
|
||||||
func (t *Tree) Walk(fn WalkFn) {
|
|
||||||
recursiveWalk(t.root, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkPrefix is used to walk the tree under a prefix
|
|
||||||
func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
|
|
||||||
n := t.root
|
|
||||||
search := prefix
|
|
||||||
for {
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
recursiveWalk(n, fn)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if strings.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
|
|
||||||
} else if strings.HasPrefix(n.prefix, search) {
|
|
||||||
// Child may be under our search prefix
|
|
||||||
recursiveWalk(n, fn)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkPath is used to walk the tree, but only visiting nodes
|
|
||||||
// from the root down to a given leaf. Where WalkPrefix walks
|
|
||||||
// all the entries *under* the given prefix, this walks the
|
|
||||||
// entries *above* the given prefix.
|
|
||||||
func (t *Tree) WalkPath(path string, fn WalkFn) {
|
|
||||||
n := t.root
|
|
||||||
search := path
|
|
||||||
for {
|
|
||||||
// Visit the leaf values if any
|
|
||||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if strings.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursiveWalk is used to do a pre-order walk of a node
|
|
||||||
// recursively. Returns true if the walk should be aborted
|
|
||||||
func recursiveWalk(n *node, fn WalkFn) bool {
|
|
||||||
// Visit the leaf values if any
|
|
||||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse on the children
|
|
||||||
for _, e := range n.edges {
|
|
||||||
if recursiveWalk(e.node, fn) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToMap is used to walk the tree and convert it into a map
|
|
||||||
func (t *Tree) ToMap() map[string]interface{} {
|
|
||||||
out := make(map[string]interface{}, t.size)
|
|
||||||
t.Walk(func(k string, v interface{}) bool {
|
|
||||||
out[k] = v
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
return out
|
|
||||||
}
|
|
62
vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
generated
vendored
62
vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
generated
vendored
@ -1,62 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: github.com/golang/protobuf/ptypes/empty/empty.proto
|
|
||||||
|
|
||||||
package empty
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Symbols defined in public import of google/protobuf/empty.proto.
|
|
||||||
|
|
||||||
type Empty = emptypb.Empty
|
|
||||||
|
|
||||||
var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
|
|
||||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
|
|
||||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
|
||||||
0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
|
||||||
0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
|
|
||||||
0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
|
|
||||||
var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
|
|
||||||
func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
|
|
||||||
if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
|
|
||||||
file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
|
|
||||||
}
|
|
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
24
vendor/github.com/hashicorp/go-immutable-radix/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
23
vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# UNRELEASED
|
|
||||||
|
|
||||||
# 1.3.0 (September 17th, 2020)
|
|
||||||
|
|
||||||
FEATURES
|
|
||||||
|
|
||||||
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
|
|
||||||
|
|
||||||
# 1.2.0 (March 18th, 2020)
|
|
||||||
|
|
||||||
FEATURES
|
|
||||||
|
|
||||||
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
|
|
||||||
|
|
||||||
# 1.1.0 (May 22nd, 2019)
|
|
||||||
|
|
||||||
FEATURES
|
|
||||||
|
|
||||||
* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
|
|
||||||
|
|
||||||
# 1.0.0 (August 30th, 2018)
|
|
||||||
|
|
||||||
* go mod adopted
|
|
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
@ -1,363 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
||||||
|
|
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
66
vendor/github.com/hashicorp/go-immutable-radix/README.md
generated
vendored
@ -1,66 +0,0 @@
|
|||||||
go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
|
|
||||||
=========
|
|
||||||
|
|
||||||
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
|
||||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
|
||||||
|
|
||||||
As a radix tree, it provides the following:
|
|
||||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
|
||||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
|
||||||
* Minimum / Maximum value lookups
|
|
||||||
* Ordered iteration
|
|
||||||
|
|
||||||
A tree supports using a transaction to batch multiple updates (insert, delete)
|
|
||||||
in a more efficient manner than performing each operation one at a time.
|
|
||||||
|
|
||||||
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
=============
|
|
||||||
|
|
||||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
|
|
||||||
|
|
||||||
Example
|
|
||||||
=======
|
|
||||||
|
|
||||||
Below is a simple example of usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Create a tree
|
|
||||||
r := iradix.New()
|
|
||||||
r, _, _ = r.Insert([]byte("foo"), 1)
|
|
||||||
r, _, _ = r.Insert([]byte("bar"), 2)
|
|
||||||
r, _, _ = r.Insert([]byte("foobar"), 2)
|
|
||||||
|
|
||||||
// Find the longest prefix match
|
|
||||||
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
|
|
||||||
if string(m) != "foo" {
|
|
||||||
panic("should be foo")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is an example of performing a range scan of the keys.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Create a tree
|
|
||||||
r := iradix.New()
|
|
||||||
r, _, _ = r.Insert([]byte("001"), 1)
|
|
||||||
r, _, _ = r.Insert([]byte("002"), 2)
|
|
||||||
r, _, _ = r.Insert([]byte("005"), 5)
|
|
||||||
r, _, _ = r.Insert([]byte("010"), 10)
|
|
||||||
r, _, _ = r.Insert([]byte("100"), 10)
|
|
||||||
|
|
||||||
// Range scan over the keys that sort lexicographically between [003, 050)
|
|
||||||
it := r.Root().Iterator()
|
|
||||||
it.SeekLowerBound([]byte("003"))
|
|
||||||
for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
|
|
||||||
if key >= "050" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fmt.Println(key)
|
|
||||||
}
|
|
||||||
// Output:
|
|
||||||
// 005
|
|
||||||
// 010
|
|
||||||
```
|
|
||||||
|
|
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
type edges []edge
|
|
||||||
|
|
||||||
func (e edges) Len() int {
|
|
||||||
return len(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Less(i, j int) bool {
|
|
||||||
return e[i].label < e[j].label
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Swap(i, j int) {
|
|
||||||
e[i], e[j] = e[j], e[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e edges) Sort() {
|
|
||||||
sort.Sort(e)
|
|
||||||
}
|
|
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
676
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
@ -1,676 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// defaultModifiedCache is the default size of the modified node
|
|
||||||
// cache used per transaction. This is used to cache the updates
|
|
||||||
// to the nodes near the root, while the leaves do not need to be
|
|
||||||
// cached. This is important for very large transactions to prevent
|
|
||||||
// the modified cache from growing to be enormous. This is also used
|
|
||||||
// to set the max size of the mutation notify maps since those should
|
|
||||||
// also be bounded in a similar way.
|
|
||||||
defaultModifiedCache = 8192
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tree implements an immutable radix tree. This can be treated as a
|
|
||||||
// Dictionary abstract data type. The main advantage over a standard
|
|
||||||
// hash map is prefix-based lookups and ordered iteration. The immutability
|
|
||||||
// means that it is safe to concurrently read from a Tree without any
|
|
||||||
// coordination.
|
|
||||||
type Tree struct {
|
|
||||||
root *Node
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an empty Tree
|
|
||||||
func New() *Tree {
|
|
||||||
t := &Tree{
|
|
||||||
root: &Node{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len is used to return the number of elements in the tree
|
|
||||||
func (t *Tree) Len() int {
|
|
||||||
return t.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Txn is a transaction on the tree. This transaction is applied
|
|
||||||
// atomically and returns a new tree when committed. A transaction
|
|
||||||
// is not thread safe, and should only be used by a single goroutine.
|
|
||||||
type Txn struct {
|
|
||||||
// root is the modified root for the transaction.
|
|
||||||
root *Node
|
|
||||||
|
|
||||||
// snap is a snapshot of the root node for use if we have to run the
|
|
||||||
// slow notify algorithm.
|
|
||||||
snap *Node
|
|
||||||
|
|
||||||
// size tracks the size of the tree as it is modified during the
|
|
||||||
// transaction.
|
|
||||||
size int
|
|
||||||
|
|
||||||
// writable is a cache of writable nodes that have been created during
|
|
||||||
// the course of the transaction. This allows us to re-use the same
|
|
||||||
// nodes for further writes and avoid unnecessary copies of nodes that
|
|
||||||
// have never been exposed outside the transaction. This will only hold
|
|
||||||
// up to defaultModifiedCache number of entries.
|
|
||||||
writable *simplelru.LRU
|
|
||||||
|
|
||||||
// trackChannels is used to hold channels that need to be notified to
|
|
||||||
// signal mutation of the tree. This will only hold up to
|
|
||||||
// defaultModifiedCache number of entries, after which we will set the
|
|
||||||
// trackOverflow flag, which will cause us to use a more expensive
|
|
||||||
// algorithm to perform the notifications. Mutation tracking is only
|
|
||||||
// performed if trackMutate is true.
|
|
||||||
trackChannels map[chan struct{}]struct{}
|
|
||||||
trackOverflow bool
|
|
||||||
trackMutate bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Txn starts a new transaction that can be used to mutate the tree
|
|
||||||
func (t *Tree) Txn() *Txn {
|
|
||||||
txn := &Txn{
|
|
||||||
root: t.root,
|
|
||||||
snap: t.root,
|
|
||||||
size: t.size,
|
|
||||||
}
|
|
||||||
return txn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone makes an independent copy of the transaction. The new transaction
|
|
||||||
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
|
|
||||||
func (t *Txn) Clone() *Txn {
|
|
||||||
// reset the writable node cache to avoid leaking future writes into the clone
|
|
||||||
t.writable = nil
|
|
||||||
|
|
||||||
txn := &Txn{
|
|
||||||
root: t.root,
|
|
||||||
snap: t.snap,
|
|
||||||
size: t.size,
|
|
||||||
}
|
|
||||||
return txn
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
|
|
||||||
// then notifications will be issued for affected internal nodes and leaves when
|
|
||||||
// the transaction is committed.
|
|
||||||
func (t *Txn) TrackMutate(track bool) {
|
|
||||||
t.trackMutate = track
|
|
||||||
}
|
|
||||||
|
|
||||||
// trackChannel safely attempts to track the given mutation channel, setting the
|
|
||||||
// overflow flag if we can no longer track any more. This limits the amount of
|
|
||||||
// state that will accumulate during a transaction and we have a slower algorithm
|
|
||||||
// to switch to if we overflow.
|
|
||||||
func (t *Txn) trackChannel(ch chan struct{}) {
|
|
||||||
// In overflow, make sure we don't store any more objects.
|
|
||||||
if t.trackOverflow {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this would overflow the state we reject it and set the flag (since
|
|
||||||
// we aren't tracking everything that's required any longer).
|
|
||||||
if len(t.trackChannels) >= defaultModifiedCache {
|
|
||||||
// Mark that we are in the overflow state
|
|
||||||
t.trackOverflow = true
|
|
||||||
|
|
||||||
// Clear the map so that the channels can be garbage collected. It is
|
|
||||||
// safe to do this since we have already overflowed and will be using
|
|
||||||
// the slow notify algorithm.
|
|
||||||
t.trackChannels = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the map on the fly when we need it.
|
|
||||||
if t.trackChannels == nil {
|
|
||||||
t.trackChannels = make(map[chan struct{}]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise we are good to track it.
|
|
||||||
t.trackChannels[ch] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeNode returns a node to be modified, if the current node has already been
|
|
||||||
// modified during the course of the transaction, it is used in-place. Set
|
|
||||||
// forLeafUpdate to true if you are getting a write node to update the leaf,
|
|
||||||
// which will set leaf mutation tracking appropriately as well.
|
|
||||||
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
|
|
||||||
// Ensure the writable set exists.
|
|
||||||
if t.writable == nil {
|
|
||||||
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
t.writable = lru
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this node has already been modified, we can continue to use it
|
|
||||||
// during this transaction. We know that we don't need to track it for
|
|
||||||
// a node update since the node is writable, but if this is for a leaf
|
|
||||||
// update we track it, in case the initial write to this node didn't
|
|
||||||
// update the leaf.
|
|
||||||
if _, ok := t.writable.Get(n); ok {
|
|
||||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
|
||||||
t.trackChannel(n.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark this node as being mutated.
|
|
||||||
if t.trackMutate {
|
|
||||||
t.trackChannel(n.mutateCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark its leaf as being mutated, if appropriate.
|
|
||||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
|
||||||
t.trackChannel(n.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the existing node. If you have set forLeafUpdate it will be
|
|
||||||
// safe to replace this leaf with another after you get your node for
|
|
||||||
// writing. You MUST replace it, because the channel associated with
|
|
||||||
// this leaf will be closed when this transaction is committed.
|
|
||||||
nc := &Node{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
leaf: n.leaf,
|
|
||||||
}
|
|
||||||
if n.prefix != nil {
|
|
||||||
nc.prefix = make([]byte, len(n.prefix))
|
|
||||||
copy(nc.prefix, n.prefix)
|
|
||||||
}
|
|
||||||
if len(n.edges) != 0 {
|
|
||||||
nc.edges = make([]edge, len(n.edges))
|
|
||||||
copy(nc.edges, n.edges)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark this node as writable.
|
|
||||||
t.writable.Add(nc, nil)
|
|
||||||
return nc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
|
|
||||||
// Returns the size of the subtree visited
|
|
||||||
func (t *Txn) trackChannelsAndCount(n *Node) int {
|
|
||||||
// Count only leaf nodes
|
|
||||||
leaves := 0
|
|
||||||
if n.leaf != nil {
|
|
||||||
leaves = 1
|
|
||||||
}
|
|
||||||
// Mark this node as being mutated.
|
|
||||||
if t.trackMutate {
|
|
||||||
t.trackChannel(n.mutateCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark its leaf as being mutated, if appropriate.
|
|
||||||
if t.trackMutate && n.leaf != nil {
|
|
||||||
t.trackChannel(n.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse on the children
|
|
||||||
for _, e := range n.edges {
|
|
||||||
leaves += t.trackChannelsAndCount(e.node)
|
|
||||||
}
|
|
||||||
return leaves
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergeChild is called to collapse the given node with its child. This is only
|
|
||||||
// called when the given node is not a leaf and has a single edge.
|
|
||||||
func (t *Txn) mergeChild(n *Node) {
|
|
||||||
// Mark the child node as being mutated since we are about to abandon
|
|
||||||
// it. We don't need to mark the leaf since we are retaining it if it
|
|
||||||
// is there.
|
|
||||||
e := n.edges[0]
|
|
||||||
child := e.node
|
|
||||||
if t.trackMutate {
|
|
||||||
t.trackChannel(child.mutateCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge the nodes.
|
|
||||||
n.prefix = concat(n.prefix, child.prefix)
|
|
||||||
n.leaf = child.leaf
|
|
||||||
if len(child.edges) != 0 {
|
|
||||||
n.edges = make([]edge, len(child.edges))
|
|
||||||
copy(n.edges, child.edges)
|
|
||||||
} else {
|
|
||||||
n.edges = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert does a recursive insertion
|
|
||||||
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
|
|
||||||
// Handle key exhaustion
|
|
||||||
if len(search) == 0 {
|
|
||||||
var oldVal interface{}
|
|
||||||
didUpdate := false
|
|
||||||
if n.isLeaf() {
|
|
||||||
oldVal = n.leaf.val
|
|
||||||
didUpdate = true
|
|
||||||
}
|
|
||||||
|
|
||||||
nc := t.writeNode(n, true)
|
|
||||||
nc.leaf = &leafNode{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
key: k,
|
|
||||||
val: v,
|
|
||||||
}
|
|
||||||
return nc, oldVal, didUpdate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for the edge
|
|
||||||
idx, child := n.getEdge(search[0])
|
|
||||||
|
|
||||||
// No edge, create one
|
|
||||||
if child == nil {
|
|
||||||
e := edge{
|
|
||||||
label: search[0],
|
|
||||||
node: &Node{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
leaf: &leafNode{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
key: k,
|
|
||||||
val: v,
|
|
||||||
},
|
|
||||||
prefix: search,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
nc := t.writeNode(n, false)
|
|
||||||
nc.addEdge(e)
|
|
||||||
return nc, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine longest prefix of the search key on match
|
|
||||||
commonPrefix := longestPrefix(search, child.prefix)
|
|
||||||
if commonPrefix == len(child.prefix) {
|
|
||||||
search = search[commonPrefix:]
|
|
||||||
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
|
|
||||||
if newChild != nil {
|
|
||||||
nc := t.writeNode(n, false)
|
|
||||||
nc.edges[idx].node = newChild
|
|
||||||
return nc, oldVal, didUpdate
|
|
||||||
}
|
|
||||||
return nil, oldVal, didUpdate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split the node
|
|
||||||
nc := t.writeNode(n, false)
|
|
||||||
splitNode := &Node{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
prefix: search[:commonPrefix],
|
|
||||||
}
|
|
||||||
nc.replaceEdge(edge{
|
|
||||||
label: search[0],
|
|
||||||
node: splitNode,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Restore the existing child node
|
|
||||||
modChild := t.writeNode(child, false)
|
|
||||||
splitNode.addEdge(edge{
|
|
||||||
label: modChild.prefix[commonPrefix],
|
|
||||||
node: modChild,
|
|
||||||
})
|
|
||||||
modChild.prefix = modChild.prefix[commonPrefix:]
|
|
||||||
|
|
||||||
// Create a new leaf node
|
|
||||||
leaf := &leafNode{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
key: k,
|
|
||||||
val: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the new key is a subset, add to to this node
|
|
||||||
search = search[commonPrefix:]
|
|
||||||
if len(search) == 0 {
|
|
||||||
splitNode.leaf = leaf
|
|
||||||
return nc, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new edge for the node
|
|
||||||
splitNode.addEdge(edge{
|
|
||||||
label: search[0],
|
|
||||||
node: &Node{
|
|
||||||
mutateCh: make(chan struct{}),
|
|
||||||
leaf: leaf,
|
|
||||||
prefix: search,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return nc, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete does a recursive deletion
|
|
||||||
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
|
|
||||||
// Check for key exhaustion
|
|
||||||
if len(search) == 0 {
|
|
||||||
if !n.isLeaf() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// Copy the pointer in case we are in a transaction that already
|
|
||||||
// modified this node since the node will be reused. Any changes
|
|
||||||
// made to the node will not affect returning the original leaf
|
|
||||||
// value.
|
|
||||||
oldLeaf := n.leaf
|
|
||||||
|
|
||||||
// Remove the leaf node
|
|
||||||
nc := t.writeNode(n, true)
|
|
||||||
nc.leaf = nil
|
|
||||||
|
|
||||||
// Check if this node should be merged
|
|
||||||
if n != t.root && len(nc.edges) == 1 {
|
|
||||||
t.mergeChild(nc)
|
|
||||||
}
|
|
||||||
return nc, oldLeaf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
label := search[0]
|
|
||||||
idx, child := n.getEdge(label)
|
|
||||||
if child == nil || !bytes.HasPrefix(search, child.prefix) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
search = search[len(child.prefix):]
|
|
||||||
newChild, leaf := t.delete(n, child, search)
|
|
||||||
if newChild == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
|
||||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
|
||||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
|
||||||
// so be careful if you change any of the logic here.
|
|
||||||
nc := t.writeNode(n, false)
|
|
||||||
|
|
||||||
// Delete the edge if the node has no edges
|
|
||||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
|
||||||
nc.delEdge(label)
|
|
||||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
|
||||||
t.mergeChild(nc)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nc.edges[idx].node = newChild
|
|
||||||
}
|
|
||||||
return nc, leaf
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete does a recursive deletion
|
|
||||||
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
|
|
||||||
// Check for key exhaustion
|
|
||||||
if len(search) == 0 {
|
|
||||||
nc := t.writeNode(n, true)
|
|
||||||
if n.isLeaf() {
|
|
||||||
nc.leaf = nil
|
|
||||||
}
|
|
||||||
nc.edges = nil
|
|
||||||
return nc, t.trackChannelsAndCount(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
label := search[0]
|
|
||||||
idx, child := n.getEdge(label)
|
|
||||||
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
|
|
||||||
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
|
|
||||||
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
|
|
||||||
return nil, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if len(child.prefix) > len(search) {
|
|
||||||
search = []byte("")
|
|
||||||
} else {
|
|
||||||
search = search[len(child.prefix):]
|
|
||||||
}
|
|
||||||
newChild, numDeletions := t.deletePrefix(n, child, search)
|
|
||||||
if newChild == nil {
|
|
||||||
return nil, 0
|
|
||||||
}
|
|
||||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
|
||||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
|
||||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
|
||||||
// so be careful if you change any of the logic here.
|
|
||||||
|
|
||||||
nc := t.writeNode(n, false)
|
|
||||||
|
|
||||||
// Delete the edge if the node has no edges
|
|
||||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
|
||||||
nc.delEdge(label)
|
|
||||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
|
||||||
t.mergeChild(nc)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nc.edges[idx].node = newChild
|
|
||||||
}
|
|
||||||
return nc, numDeletions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert is used to add or update a given key. The return provides
|
|
||||||
// the previous value and a bool indicating if any was set.
|
|
||||||
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
|
|
||||||
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
|
|
||||||
if newRoot != nil {
|
|
||||||
t.root = newRoot
|
|
||||||
}
|
|
||||||
if !didUpdate {
|
|
||||||
t.size++
|
|
||||||
}
|
|
||||||
return oldVal, didUpdate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete is used to delete a given key. Returns the old value if any,
|
|
||||||
// and a bool indicating if the key was set.
|
|
||||||
func (t *Txn) Delete(k []byte) (interface{}, bool) {
|
|
||||||
newRoot, leaf := t.delete(nil, t.root, k)
|
|
||||||
if newRoot != nil {
|
|
||||||
t.root = newRoot
|
|
||||||
}
|
|
||||||
if leaf != nil {
|
|
||||||
t.size--
|
|
||||||
return leaf.val, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePrefix is used to delete an entire subtree that matches the prefix
|
|
||||||
// This will delete all nodes under that prefix
|
|
||||||
func (t *Txn) DeletePrefix(prefix []byte) bool {
|
|
||||||
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
|
|
||||||
if newRoot != nil {
|
|
||||||
t.root = newRoot
|
|
||||||
t.size = t.size - numDeletions
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the current root of the radix tree within this
|
|
||||||
// transaction. The root is not safe across insert and delete operations,
|
|
||||||
// but can be used to read the current state during a transaction.
|
|
||||||
func (t *Txn) Root() *Node {
|
|
||||||
return t.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get is used to lookup a specific key, returning
|
|
||||||
// the value and if it was found
|
|
||||||
func (t *Txn) Get(k []byte) (interface{}, bool) {
|
|
||||||
return t.root.Get(k)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWatch is used to lookup a specific key, returning
|
|
||||||
// the watch channel, value and if it was found
|
|
||||||
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
|
||||||
return t.root.GetWatch(k)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit is used to finalize the transaction and return a new tree. If mutation
|
|
||||||
// tracking is turned on then notifications will also be issued.
|
|
||||||
func (t *Txn) Commit() *Tree {
|
|
||||||
nt := t.CommitOnly()
|
|
||||||
if t.trackMutate {
|
|
||||||
t.Notify()
|
|
||||||
}
|
|
||||||
return nt
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommitOnly is used to finalize the transaction and return a new tree, but
|
|
||||||
// does not issue any notifications until Notify is called.
|
|
||||||
func (t *Txn) CommitOnly() *Tree {
|
|
||||||
nt := &Tree{t.root, t.size}
|
|
||||||
t.writable = nil
|
|
||||||
return nt
|
|
||||||
}
|
|
||||||
|
|
||||||
// slowNotify does a complete comparison of the before and after trees in order
|
|
||||||
// to trigger notifications. This doesn't require any additional state but it
|
|
||||||
// is very expensive to compute.
|
|
||||||
func (t *Txn) slowNotify() {
|
|
||||||
snapIter := t.snap.rawIterator()
|
|
||||||
rootIter := t.root.rawIterator()
|
|
||||||
for snapIter.Front() != nil || rootIter.Front() != nil {
|
|
||||||
// If we've exhausted the nodes in the old snapshot, we know
|
|
||||||
// there's nothing remaining to notify.
|
|
||||||
if snapIter.Front() == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
snapElem := snapIter.Front()
|
|
||||||
|
|
||||||
// If we've exhausted the nodes in the new root, we know we need
|
|
||||||
// to invalidate everything that remains in the old snapshot. We
|
|
||||||
// know from the loop condition there's something in the old
|
|
||||||
// snapshot.
|
|
||||||
if rootIter.Front() == nil {
|
|
||||||
close(snapElem.mutateCh)
|
|
||||||
if snapElem.isLeaf() {
|
|
||||||
close(snapElem.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
snapIter.Next()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do one string compare so we can check the various conditions
|
|
||||||
// below without repeating the compare.
|
|
||||||
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
|
|
||||||
|
|
||||||
// If the snapshot is behind the root, then we must have deleted
|
|
||||||
// this node during the transaction.
|
|
||||||
if cmp < 0 {
|
|
||||||
close(snapElem.mutateCh)
|
|
||||||
if snapElem.isLeaf() {
|
|
||||||
close(snapElem.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
snapIter.Next()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the snapshot is ahead of the root, then we must have added
|
|
||||||
// this node during the transaction.
|
|
||||||
if cmp > 0 {
|
|
||||||
rootIter.Next()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have the same path, then we need to see if we mutated a
|
|
||||||
// node and possibly the leaf.
|
|
||||||
rootElem := rootIter.Front()
|
|
||||||
if snapElem != rootElem {
|
|
||||||
close(snapElem.mutateCh)
|
|
||||||
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
|
|
||||||
close(snapElem.leaf.mutateCh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
snapIter.Next()
|
|
||||||
rootIter.Next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify is used along with TrackMutate to trigger notifications. This must
|
|
||||||
// only be done once a transaction is committed via CommitOnly, and it is called
|
|
||||||
// automatically by Commit.
|
|
||||||
func (t *Txn) Notify() {
|
|
||||||
if !t.trackMutate {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we've overflowed the tracking state we can't use it in any way and
|
|
||||||
// need to do a full tree compare.
|
|
||||||
if t.trackOverflow {
|
|
||||||
t.slowNotify()
|
|
||||||
} else {
|
|
||||||
for ch := range t.trackChannels {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up the tracking state so that a re-notify is safe (will trigger
|
|
||||||
// the else clause above which will be a no-op).
|
|
||||||
t.trackChannels = nil
|
|
||||||
t.trackOverflow = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert is used to add or update a given key. The return provides
|
|
||||||
// the new tree, previous value and a bool indicating if any was set.
|
|
||||||
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
|
|
||||||
txn := t.Txn()
|
|
||||||
old, ok := txn.Insert(k, v)
|
|
||||||
return txn.Commit(), old, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete is used to delete a given key. Returns the new tree,
|
|
||||||
// old value if any, and a bool indicating if the key was set.
|
|
||||||
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
|
|
||||||
txn := t.Txn()
|
|
||||||
old, ok := txn.Delete(k)
|
|
||||||
return txn.Commit(), old, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
|
|
||||||
// and a bool indicating if the prefix matched any nodes
|
|
||||||
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
|
|
||||||
txn := t.Txn()
|
|
||||||
ok := txn.DeletePrefix(k)
|
|
||||||
return txn.Commit(), ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root node of the tree which can be used for richer
|
|
||||||
// query operations.
|
|
||||||
func (t *Tree) Root() *Node {
|
|
||||||
return t.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get is used to lookup a specific key, returning
|
|
||||||
// the value and if it was found
|
|
||||||
func (t *Tree) Get(k []byte) (interface{}, bool) {
|
|
||||||
return t.root.Get(k)
|
|
||||||
}
|
|
||||||
|
|
||||||
// longestPrefix finds the length of the shared prefix
|
|
||||||
// of two strings
|
|
||||||
func longestPrefix(k1, k2 []byte) int {
|
|
||||||
max := len(k1)
|
|
||||||
if l := len(k2); l < max {
|
|
||||||
max = l
|
|
||||||
}
|
|
||||||
var i int
|
|
||||||
for i = 0; i < max; i++ {
|
|
||||||
if k1[i] != k2[i] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// concat two byte slices, returning a third new copy
|
|
||||||
func concat(a, b []byte) []byte {
|
|
||||||
c := make([]byte, len(a)+len(b))
|
|
||||||
copy(c, a)
|
|
||||||
copy(c[len(a):], b)
|
|
||||||
return c
|
|
||||||
}
|
|
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
205
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
@ -1,205 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Iterator is used to iterate over a set of nodes
|
|
||||||
// in pre-order
|
|
||||||
type Iterator struct {
|
|
||||||
node *Node
|
|
||||||
stack []edges
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
|
||||||
// and returns the watch channel of the finest granularity
|
|
||||||
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
|
||||||
// Wipe the stack
|
|
||||||
i.stack = nil
|
|
||||||
n := i.node
|
|
||||||
watch = n.mutateCh
|
|
||||||
search := prefix
|
|
||||||
for {
|
|
||||||
// Check for key exhaustion
|
|
||||||
if len(search) == 0 {
|
|
||||||
i.node = n
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
_, n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
i.node = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update to the finest granularity as the search makes progress
|
|
||||||
watch = n.mutateCh
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if bytes.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
|
|
||||||
} else if bytes.HasPrefix(n.prefix, search) {
|
|
||||||
i.node = n
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
i.node = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekPrefix is used to seek the iterator to a given prefix
|
|
||||||
func (i *Iterator) SeekPrefix(prefix []byte) {
|
|
||||||
i.SeekPrefixWatch(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Iterator) recurseMin(n *Node) *Node {
|
|
||||||
// Traverse to the minimum child
|
|
||||||
if n.leaf != nil {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
nEdges := len(n.edges)
|
|
||||||
if nEdges > 1 {
|
|
||||||
// Add all the other edges to the stack (the min node will be added as
|
|
||||||
// we recurse)
|
|
||||||
i.stack = append(i.stack, n.edges[1:])
|
|
||||||
}
|
|
||||||
if nEdges > 0 {
|
|
||||||
return i.recurseMin(n.edges[0].node)
|
|
||||||
}
|
|
||||||
// Shouldn't be possible
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekLowerBound is used to seek the iterator to the smallest key that is
|
|
||||||
// greater or equal to the given key. There is no watch variant as it's hard to
|
|
||||||
// predict based on the radix structure which node(s) changes might affect the
|
|
||||||
// result.
|
|
||||||
func (i *Iterator) SeekLowerBound(key []byte) {
|
|
||||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
|
||||||
// go because we need only a subset of edges of many nodes in the path to the
|
|
||||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
|
||||||
// children that we don't traverse on the way to the reverse lower bound as it
|
|
||||||
// walks the stack.
|
|
||||||
i.stack = []edges{}
|
|
||||||
// i.node starts off in the common case as pointing to the root node of the
|
|
||||||
// tree. By the time we return we have either found a lower bound and setup
|
|
||||||
// the stack to traverse all larger keys, or we have not and the stack and
|
|
||||||
// node should both be nil to prevent the iterator from assuming it is just
|
|
||||||
// iterating the whole tree from the root node. Either way this needs to end
|
|
||||||
// up as nil so just set it here.
|
|
||||||
n := i.node
|
|
||||||
i.node = nil
|
|
||||||
search := key
|
|
||||||
|
|
||||||
found := func(n *Node) {
|
|
||||||
i.stack = append(i.stack, edges{edge{node: n}})
|
|
||||||
}
|
|
||||||
|
|
||||||
findMin := func(n *Node) {
|
|
||||||
n = i.recurseMin(n)
|
|
||||||
if n != nil {
|
|
||||||
found(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Compare current prefix with the search key's same-length prefix.
|
|
||||||
var prefixCmp int
|
|
||||||
if len(n.prefix) < len(search) {
|
|
||||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
|
||||||
} else {
|
|
||||||
prefixCmp = bytes.Compare(n.prefix, search)
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefixCmp > 0 {
|
|
||||||
// Prefix is larger, that means the lower bound is greater than the search
|
|
||||||
// and from now on we need to follow the minimum path to the smallest
|
|
||||||
// leaf under this subtree.
|
|
||||||
findMin(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefixCmp < 0 {
|
|
||||||
// Prefix is smaller than search prefix, that means there is no lower
|
|
||||||
// bound
|
|
||||||
i.node = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prefix is equal, we are still heading for an exact match. If this is a
|
|
||||||
// leaf and an exact match we're done.
|
|
||||||
if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
|
|
||||||
found(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix if the current node has one. Note that this is
|
|
||||||
// safe because if n.prefix is longer than the search slice prefixCmp would
|
|
||||||
// have been > 0 above and the method would have already returned.
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
|
|
||||||
if len(search) == 0 {
|
|
||||||
// We've exhausted the search key, but the current node is not an exact
|
|
||||||
// match or not a leaf. That means that the leaf value if it exists, and
|
|
||||||
// all child nodes must be strictly greater, the smallest key in this
|
|
||||||
// subtree must be the lower bound.
|
|
||||||
findMin(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, take the lower bound next edge.
|
|
||||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
|
||||||
if lbNode == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create stack edges for the all strictly higher edges in this node.
|
|
||||||
if idx+1 < len(n.edges) {
|
|
||||||
i.stack = append(i.stack, n.edges[idx+1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse
|
|
||||||
n = lbNode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next node in order
|
|
||||||
func (i *Iterator) Next() ([]byte, interface{}, bool) {
|
|
||||||
// Initialize our stack if needed
|
|
||||||
if i.stack == nil && i.node != nil {
|
|
||||||
i.stack = []edges{
|
|
||||||
{
|
|
||||||
edge{node: i.node},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(i.stack) > 0 {
|
|
||||||
// Inspect the last element of the stack
|
|
||||||
n := len(i.stack)
|
|
||||||
last := i.stack[n-1]
|
|
||||||
elem := last[0].node
|
|
||||||
|
|
||||||
// Update the stack
|
|
||||||
if len(last) > 1 {
|
|
||||||
i.stack[n-1] = last[1:]
|
|
||||||
} else {
|
|
||||||
i.stack = i.stack[:n-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push the edges onto the frontier
|
|
||||||
if len(elem.edges) > 0 {
|
|
||||||
i.stack = append(i.stack, elem.edges)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the leaf values if any
|
|
||||||
if elem.leaf != nil {
|
|
||||||
return elem.leaf.key, elem.leaf.val, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
334
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
@ -1,334 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WalkFn is used when walking the tree. Takes a
|
|
||||||
// key and value, returning if iteration should
|
|
||||||
// be terminated.
|
|
||||||
type WalkFn func(k []byte, v interface{}) bool
|
|
||||||
|
|
||||||
// leafNode is used to represent a value
|
|
||||||
type leafNode struct {
|
|
||||||
mutateCh chan struct{}
|
|
||||||
key []byte
|
|
||||||
val interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// edge is used to represent an edge node
|
|
||||||
type edge struct {
|
|
||||||
label byte
|
|
||||||
node *Node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node is an immutable node in the radix tree
|
|
||||||
type Node struct {
|
|
||||||
// mutateCh is closed if this node is modified
|
|
||||||
mutateCh chan struct{}
|
|
||||||
|
|
||||||
// leaf is used to store possible leaf
|
|
||||||
leaf *leafNode
|
|
||||||
|
|
||||||
// prefix is the common prefix we ignore
|
|
||||||
prefix []byte
|
|
||||||
|
|
||||||
// Edges should be stored in-order for iteration.
|
|
||||||
// We avoid a fully materialized slice to save memory,
|
|
||||||
// since in most cases we expect to be sparse
|
|
||||||
edges edges
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) isLeaf() bool {
|
|
||||||
return n.leaf != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) addEdge(e edge) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= e.label
|
|
||||||
})
|
|
||||||
n.edges = append(n.edges, e)
|
|
||||||
if idx != num {
|
|
||||||
copy(n.edges[idx+1:], n.edges[idx:num])
|
|
||||||
n.edges[idx] = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) replaceEdge(e edge) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= e.label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == e.label {
|
|
||||||
n.edges[idx].node = e.node
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic("replacing missing edge")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) getEdge(label byte) (int, *Node) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == label {
|
|
||||||
return idx, n.edges[idx].node
|
|
||||||
}
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
// we want lower bound behavior so return even if it's not an exact match
|
|
||||||
if idx < num {
|
|
||||||
return idx, n.edges[idx].node
|
|
||||||
}
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) delEdge(label byte) {
|
|
||||||
num := len(n.edges)
|
|
||||||
idx := sort.Search(num, func(i int) bool {
|
|
||||||
return n.edges[i].label >= label
|
|
||||||
})
|
|
||||||
if idx < num && n.edges[idx].label == label {
|
|
||||||
copy(n.edges[idx:], n.edges[idx+1:])
|
|
||||||
n.edges[len(n.edges)-1] = edge{}
|
|
||||||
n.edges = n.edges[:len(n.edges)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
|
||||||
search := k
|
|
||||||
watch := n.mutateCh
|
|
||||||
for {
|
|
||||||
// Check for key exhaustion
|
|
||||||
if len(search) == 0 {
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.mutateCh, n.leaf.val, true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
_, n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update to the finest granularity as the search makes progress
|
|
||||||
watch = n.mutateCh
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if bytes.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return watch, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) Get(k []byte) (interface{}, bool) {
|
|
||||||
_, val, ok := n.GetWatch(k)
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// LongestPrefix is like Get, but instead of an
|
|
||||||
// exact match, it will return the longest prefix match.
|
|
||||||
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
|
|
||||||
var last *leafNode
|
|
||||||
search := k
|
|
||||||
for {
|
|
||||||
// Look for a leaf node
|
|
||||||
if n.isLeaf() {
|
|
||||||
last = n.leaf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
_, n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if bytes.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last != nil {
|
|
||||||
return last.key, last.val, true
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum is used to return the minimum value in the tree
|
|
||||||
func (n *Node) Minimum() ([]byte, interface{}, bool) {
|
|
||||||
for {
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.key, n.leaf.val, true
|
|
||||||
}
|
|
||||||
if len(n.edges) > 0 {
|
|
||||||
n = n.edges[0].node
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum is used to return the maximum value in the tree
|
|
||||||
func (n *Node) Maximum() ([]byte, interface{}, bool) {
|
|
||||||
for {
|
|
||||||
if num := len(n.edges); num > 0 {
|
|
||||||
n = n.edges[num-1].node
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if n.isLeaf() {
|
|
||||||
return n.leaf.key, n.leaf.val, true
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterator is used to return an iterator at
|
|
||||||
// the given node to walk the tree
|
|
||||||
func (n *Node) Iterator() *Iterator {
|
|
||||||
return &Iterator{node: n}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReverseIterator is used to return an iterator at
|
|
||||||
// the given node to walk the tree backwards
|
|
||||||
func (n *Node) ReverseIterator() *ReverseIterator {
|
|
||||||
return NewReverseIterator(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawIterator is used to return a raw iterator at the given node to walk the
|
|
||||||
// tree.
|
|
||||||
func (n *Node) rawIterator() *rawIterator {
|
|
||||||
iter := &rawIterator{node: n}
|
|
||||||
iter.Next()
|
|
||||||
return iter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk is used to walk the tree
|
|
||||||
func (n *Node) Walk(fn WalkFn) {
|
|
||||||
recursiveWalk(n, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkBackwards is used to walk the tree in reverse order
|
|
||||||
func (n *Node) WalkBackwards(fn WalkFn) {
|
|
||||||
reverseRecursiveWalk(n, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkPrefix is used to walk the tree under a prefix
|
|
||||||
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
|
|
||||||
search := prefix
|
|
||||||
for {
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
recursiveWalk(n, fn)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
_, n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if bytes.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
|
|
||||||
} else if bytes.HasPrefix(n.prefix, search) {
|
|
||||||
// Child may be under our search prefix
|
|
||||||
recursiveWalk(n, fn)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkPath is used to walk the tree, but only visiting nodes
|
|
||||||
// from the root down to a given leaf. Where WalkPrefix walks
|
|
||||||
// all the entries *under* the given prefix, this walks the
|
|
||||||
// entries *above* the given prefix.
|
|
||||||
func (n *Node) WalkPath(path []byte, fn WalkFn) {
|
|
||||||
search := path
|
|
||||||
for {
|
|
||||||
// Visit the leaf values if any
|
|
||||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for key exhaution
|
|
||||||
if len(search) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for an edge
|
|
||||||
_, n = n.getEdge(search[0])
|
|
||||||
if n == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix
|
|
||||||
if bytes.HasPrefix(search, n.prefix) {
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursiveWalk is used to do a pre-order walk of a node
|
|
||||||
// recursively. Returns true if the walk should be aborted
|
|
||||||
func recursiveWalk(n *Node, fn WalkFn) bool {
|
|
||||||
// Visit the leaf values if any
|
|
||||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse on the children
|
|
||||||
for _, e := range n.edges {
|
|
||||||
if recursiveWalk(e.node, fn) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// reverseRecursiveWalk is used to do a reverse pre-order
|
|
||||||
// walk of a node recursively. Returns true if the walk
|
|
||||||
// should be aborted
|
|
||||||
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
|
|
||||||
// Visit the leaf values if any
|
|
||||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse on the children in reverse order
|
|
||||||
for i := len(n.edges) - 1; i >= 0; i-- {
|
|
||||||
e := n.edges[i]
|
|
||||||
if reverseRecursiveWalk(e.node, fn) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
// rawIterator visits each of the nodes in the tree, even the ones that are not
|
|
||||||
// leaves. It keeps track of the effective path (what a leaf at a given node
|
|
||||||
// would be called), which is useful for comparing trees.
|
|
||||||
type rawIterator struct {
|
|
||||||
// node is the starting node in the tree for the iterator.
|
|
||||||
node *Node
|
|
||||||
|
|
||||||
// stack keeps track of edges in the frontier.
|
|
||||||
stack []rawStackEntry
|
|
||||||
|
|
||||||
// pos is the current position of the iterator.
|
|
||||||
pos *Node
|
|
||||||
|
|
||||||
// path is the effective path of the current iterator position,
|
|
||||||
// regardless of whether the current node is a leaf.
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawStackEntry is used to keep track of the cumulative common path as well as
|
|
||||||
// its associated edges in the frontier.
|
|
||||||
type rawStackEntry struct {
|
|
||||||
path string
|
|
||||||
edges edges
|
|
||||||
}
|
|
||||||
|
|
||||||
// Front returns the current node that has been iterated to.
|
|
||||||
func (i *rawIterator) Front() *Node {
|
|
||||||
return i.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the effective path of the current node, even if it's not actually
|
|
||||||
// a leaf.
|
|
||||||
func (i *rawIterator) Path() string {
|
|
||||||
return i.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next advances the iterator to the next node.
|
|
||||||
func (i *rawIterator) Next() {
|
|
||||||
// Initialize our stack if needed.
|
|
||||||
if i.stack == nil && i.node != nil {
|
|
||||||
i.stack = []rawStackEntry{
|
|
||||||
{
|
|
||||||
edges: edges{
|
|
||||||
edge{node: i.node},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(i.stack) > 0 {
|
|
||||||
// Inspect the last element of the stack.
|
|
||||||
n := len(i.stack)
|
|
||||||
last := i.stack[n-1]
|
|
||||||
elem := last.edges[0].node
|
|
||||||
|
|
||||||
// Update the stack.
|
|
||||||
if len(last.edges) > 1 {
|
|
||||||
i.stack[n-1].edges = last.edges[1:]
|
|
||||||
} else {
|
|
||||||
i.stack = i.stack[:n-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push the edges onto the frontier.
|
|
||||||
if len(elem.edges) > 0 {
|
|
||||||
path := last.path + string(elem.prefix)
|
|
||||||
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
|
|
||||||
}
|
|
||||||
|
|
||||||
i.pos = elem
|
|
||||||
i.path = last.path + string(elem.prefix)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
i.pos = nil
|
|
||||||
i.path = ""
|
|
||||||
}
|
|
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
239
vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
generated
vendored
@ -1,239 +0,0 @@
|
|||||||
package iradix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReverseIterator is used to iterate over a set of nodes
|
|
||||||
// in reverse in-order
|
|
||||||
type ReverseIterator struct {
|
|
||||||
i *Iterator
|
|
||||||
|
|
||||||
// expandedParents stores the set of parent nodes whose relevant children have
|
|
||||||
// already been pushed into the stack. This can happen during seek or during
|
|
||||||
// iteration.
|
|
||||||
//
|
|
||||||
// Unlike forward iteration we need to recurse into children before we can
|
|
||||||
// output the value stored in an internal leaf since all children are greater.
|
|
||||||
// We use this to track whether we have already ensured all the children are
|
|
||||||
// in the stack.
|
|
||||||
expandedParents map[*Node]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReverseIterator returns a new ReverseIterator at a node
|
|
||||||
func NewReverseIterator(n *Node) *ReverseIterator {
|
|
||||||
return &ReverseIterator{
|
|
||||||
i: &Iterator{node: n},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
|
||||||
// and returns the watch channel of the finest granularity
|
|
||||||
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
|
||||||
return ri.i.SeekPrefixWatch(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekPrefix is used to seek the iterator to a given prefix
|
|
||||||
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
|
|
||||||
ri.i.SeekPrefixWatch(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
|
|
||||||
// lower or equal to the given key. There is no watch variant as it's hard to
|
|
||||||
// predict based on the radix structure which node(s) changes might affect the
|
|
||||||
// result.
|
|
||||||
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
|
|
||||||
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
|
|
||||||
// go because we need only a subset of edges of many nodes in the path to the
|
|
||||||
// leaf with the lower bound. Note that the iterator will still recurse into
|
|
||||||
// children that we don't traverse on the way to the reverse lower bound as it
|
|
||||||
// walks the stack.
|
|
||||||
ri.i.stack = []edges{}
|
|
||||||
// ri.i.node starts off in the common case as pointing to the root node of the
|
|
||||||
// tree. By the time we return we have either found a lower bound and setup
|
|
||||||
// the stack to traverse all larger keys, or we have not and the stack and
|
|
||||||
// node should both be nil to prevent the iterator from assuming it is just
|
|
||||||
// iterating the whole tree from the root node. Either way this needs to end
|
|
||||||
// up as nil so just set it here.
|
|
||||||
n := ri.i.node
|
|
||||||
ri.i.node = nil
|
|
||||||
search := key
|
|
||||||
|
|
||||||
if ri.expandedParents == nil {
|
|
||||||
ri.expandedParents = make(map[*Node]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
found := func(n *Node) {
|
|
||||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
|
||||||
// We need to mark this node as expanded in advance too otherwise the
|
|
||||||
// iterator will attempt to walk all of its children even though they are
|
|
||||||
// greater than the lower bound we have found. We've expanded it in the
|
|
||||||
// sense that all of its children that we want to walk are already in the
|
|
||||||
// stack (i.e. none of them).
|
|
||||||
ri.expandedParents[n] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Compare current prefix with the search key's same-length prefix.
|
|
||||||
var prefixCmp int
|
|
||||||
if len(n.prefix) < len(search) {
|
|
||||||
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
|
|
||||||
} else {
|
|
||||||
prefixCmp = bytes.Compare(n.prefix, search)
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefixCmp < 0 {
|
|
||||||
// Prefix is smaller than search prefix, that means there is no exact
|
|
||||||
// match for the search key. But we are looking in reverse, so the reverse
|
|
||||||
// lower bound will be the largest leaf under this subtree, since it is
|
|
||||||
// the value that would come right before the current search key if it
|
|
||||||
// were in the tree. So we need to follow the maximum path in this subtree
|
|
||||||
// to find it. Note that this is exactly what the iterator will already do
|
|
||||||
// if it finds a node in the stack that has _not_ been marked as expanded
|
|
||||||
// so in this one case we don't call `found` and instead let the iterator
|
|
||||||
// do the expansion and recursion through all the children.
|
|
||||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefixCmp > 0 {
|
|
||||||
// Prefix is larger than search prefix, or there is no prefix but we've
|
|
||||||
// also exhausted the search key. Either way, that means there is no
|
|
||||||
// reverse lower bound since nothing comes before our current search
|
|
||||||
// prefix.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a leaf, something needs to happen! Note that if it's a leaf
|
|
||||||
// and prefixCmp was zero (which it must be to get here) then the leaf value
|
|
||||||
// is either an exact match for the search, or it's lower. It can't be
|
|
||||||
// greater.
|
|
||||||
if n.isLeaf() {
|
|
||||||
|
|
||||||
// Firstly, if it's an exact match, we're done!
|
|
||||||
if bytes.Equal(n.leaf.key, key) {
|
|
||||||
found(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// It's not so this node's leaf value must be lower and could still be a
|
|
||||||
// valid contender for reverse lower bound.
|
|
||||||
|
|
||||||
// If it has no children then we are also done.
|
|
||||||
if len(n.edges) == 0 {
|
|
||||||
// This leaf is the lower bound.
|
|
||||||
found(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, this leaf is internal (has children) so we'll keep searching,
|
|
||||||
// but we need to add it to the iterator's stack since it has a leaf value
|
|
||||||
// that needs to be iterated over. It needs to be added to the stack
|
|
||||||
// before its children below as it comes first.
|
|
||||||
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
|
|
||||||
// We also need to mark it as expanded since we'll be adding any of its
|
|
||||||
// relevant children below and so don't want the iterator to re-add them
|
|
||||||
// on its way back up the stack.
|
|
||||||
ri.expandedParents[n] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the search prefix. Note that this is safe because if n.prefix is
|
|
||||||
// longer than the search slice prefixCmp would have been > 0 above and the
|
|
||||||
// method would have already returned.
|
|
||||||
search = search[len(n.prefix):]
|
|
||||||
|
|
||||||
if len(search) == 0 {
|
|
||||||
// We've exhausted the search key but we are not at a leaf. That means all
|
|
||||||
// children are greater than the search key so a reverse lower bound
|
|
||||||
// doesn't exist in this subtree. Note that there might still be one in
|
|
||||||
// the whole radix tree by following a different path somewhere further
|
|
||||||
// up. If that's the case then the iterator's stack will contain all the
|
|
||||||
// smaller nodes already and Previous will walk through them correctly.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, take the lower bound next edge.
|
|
||||||
idx, lbNode := n.getLowerBoundEdge(search[0])
|
|
||||||
|
|
||||||
// From here, we need to update the stack with all values lower than
|
|
||||||
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
|
|
||||||
// search prefix is larger than all edges, we need to place idx at the
|
|
||||||
// last edge index so they can all be place in the stack, since they
|
|
||||||
// come before our search prefix.
|
|
||||||
if idx == -1 {
|
|
||||||
idx = len(n.edges)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create stack edges for the all strictly lower edges in this node.
|
|
||||||
if len(n.edges[:idx]) > 0 {
|
|
||||||
ri.i.stack = append(ri.i.stack, n.edges[:idx])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exit if there's no lower bound edge. The stack will have the previous
|
|
||||||
// nodes already.
|
|
||||||
if lbNode == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse
|
|
||||||
n = lbNode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Previous returns the previous node in reverse order
|
|
||||||
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
|
|
||||||
// Initialize our stack if needed
|
|
||||||
if ri.i.stack == nil && ri.i.node != nil {
|
|
||||||
ri.i.stack = []edges{
|
|
||||||
{
|
|
||||||
edge{node: ri.i.node},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ri.expandedParents == nil {
|
|
||||||
ri.expandedParents = make(map[*Node]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(ri.i.stack) > 0 {
|
|
||||||
// Inspect the last element of the stack
|
|
||||||
n := len(ri.i.stack)
|
|
||||||
last := ri.i.stack[n-1]
|
|
||||||
m := len(last)
|
|
||||||
elem := last[m-1].node
|
|
||||||
|
|
||||||
_, alreadyExpanded := ri.expandedParents[elem]
|
|
||||||
|
|
||||||
// If this is an internal node and we've not seen it already, we need to
|
|
||||||
// leave it in the stack so we can return its possible leaf value _after_
|
|
||||||
// we've recursed through all its children.
|
|
||||||
if len(elem.edges) > 0 && !alreadyExpanded {
|
|
||||||
// record that we've seen this node!
|
|
||||||
ri.expandedParents[elem] = struct{}{}
|
|
||||||
// push child edges onto stack and skip the rest of the loop to recurse
|
|
||||||
// into the largest one.
|
|
||||||
ri.i.stack = append(ri.i.stack, elem.edges)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the node from the stack
|
|
||||||
if m > 1 {
|
|
||||||
ri.i.stack[n-1] = last[:m-1]
|
|
||||||
} else {
|
|
||||||
ri.i.stack = ri.i.stack[:n-1]
|
|
||||||
}
|
|
||||||
// We don't need this state any more as it's no longer in the stack so we
|
|
||||||
// won't visit it again
|
|
||||||
if alreadyExpanded {
|
|
||||||
delete(ri.expandedParents, elem)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a leaf, return it
|
|
||||||
if elem.leaf != nil {
|
|
||||||
return elem.leaf.key, elem.leaf.val, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// it's not a leaf so keep walking the stack to find the previous leaf
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
2
vendor/github.com/hashicorp/go-plugin/.gitignore
generated
vendored
2
vendor/github.com/hashicorp/go-plugin/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
.DS_Store
|
|
||||||
.idea
|
|
19
vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
generated
vendored
19
vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
## v1.4.5
|
|
||||||
|
|
||||||
ENHANCEMENTS:
|
|
||||||
|
|
||||||
* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)]
|
|
||||||
|
|
||||||
|
|
||||||
## v1.4.4
|
|
||||||
|
|
||||||
ENHANCEMENTS:
|
|
||||||
|
|
||||||
* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)]
|
|
||||||
|
|
||||||
BUG FIXES:
|
|
||||||
|
|
||||||
* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)]
|
|
||||||
* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)]
|
|
||||||
|
|
||||||
|
|
353
vendor/github.com/hashicorp/go-plugin/LICENSE
generated
vendored
353
vendor/github.com/hashicorp/go-plugin/LICENSE
generated
vendored
@ -1,353 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. “Contributor”
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. “Contributor Version”
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor’s Contribution.
|
|
||||||
|
|
||||||
1.3. “Contribution”
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. “Covered Software”
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. “Incompatible With Secondary Licenses”
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of version
|
|
||||||
1.1 or earlier of the License, but not also under the terms of a
|
|
||||||
Secondary License.
|
|
||||||
|
|
||||||
1.6. “Executable Form”
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. “Larger Work”
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a separate
|
|
||||||
file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. “License”
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. “Licensable”
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether at the
|
|
||||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
|
||||||
this License.
|
|
||||||
|
|
||||||
1.10. “Modifications”
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to, deletion
|
|
||||||
from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. “Patent Claims” of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method, process,
|
|
||||||
and apparatus claims, in any patent Licensable by such Contributor that
|
|
||||||
would be infringed, but for the grant of the License, by the making,
|
|
||||||
using, selling, offering for sale, having made, import, or transfer of
|
|
||||||
either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. “Secondary License”
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. “Source Code Form”
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. “You” (or “Your”)
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, “You” includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, “control” means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or as
|
|
||||||
part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its Contributions
|
|
||||||
or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
|
||||||
effective for each Contribution on the date the Contributor first distributes
|
|
||||||
such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under this
|
|
||||||
License. No additional rights or licenses will be implied from the distribution
|
|
||||||
or licensing of Covered Software under this License. Notwithstanding Section
|
|
||||||
2.1(b) above, no patent license is granted by a Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party’s
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
|
||||||
Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks, or
|
|
||||||
logos of any Contributor (except as may be necessary to comply with the
|
|
||||||
notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this License
|
|
||||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
|
||||||
under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its Contributions
|
|
||||||
are its original creation(s) or it has sufficient rights to grant the
|
|
||||||
rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under applicable
|
|
||||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under the
|
|
||||||
terms of this License. You must inform recipients that the Source Code Form
|
|
||||||
of the Covered Software is governed by the terms of this License, and how
|
|
||||||
they can obtain a copy of this License. You may not attempt to alter or
|
|
||||||
restrict the recipients’ rights in the Source Code Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this License,
|
|
||||||
or sublicense it under different terms, provided that the license for
|
|
||||||
the Executable Form does not attempt to limit or alter the recipients’
|
|
||||||
rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for the
|
|
||||||
Covered Software. If the Larger Work is a combination of Covered Software
|
|
||||||
with a work governed by one or more Secondary Licenses, and the Covered
|
|
||||||
Software is not Incompatible With Secondary Licenses, this License permits
|
|
||||||
You to additionally distribute such Covered Software under the terms of
|
|
||||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
|
||||||
their option, further distribute the Covered Software under the terms of
|
|
||||||
either this License or such Secondary License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices (including
|
|
||||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
|
||||||
of liability) contained within the Source Code Form of the Covered
|
|
||||||
Software, except that You may alter any license notices to the extent
|
|
||||||
required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
|
||||||
of any Contributor. You must make it absolutely clear that any such
|
|
||||||
warranty, support, indemnity, or liability obligation is offered by You
|
|
||||||
alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute, judicial
|
|
||||||
order, or regulation then You must: (a) comply with the terms of this License
|
|
||||||
to the maximum extent possible; and (b) describe the limitations and the code
|
|
||||||
they affect. Such description must be placed in a text file included with all
|
|
||||||
distributions of the Covered Software under this License. Except to the
|
|
||||||
extent prohibited by statute or regulation, such description must be
|
|
||||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
|
||||||
understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
|
||||||
if such Contributor fails to notify You of the non-compliance by some
|
|
||||||
reasonable means prior to 60 days after You have come back into compliance.
|
|
||||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
|
||||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
|
||||||
some reasonable means, this is the first time You have received notice of
|
|
||||||
non-compliance with this License from such Contributor, and You become
|
|
||||||
compliant prior to 30 days after Your receipt of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
|
||||||
and cross-claims) alleging that a Contributor Version directly or
|
|
||||||
indirectly infringes any patent, then the rights granted to You by any and
|
|
||||||
all Contributors for the Covered Software under Section 2.1 of this License
|
|
||||||
shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an “as is” basis, without
|
|
||||||
warranty of any kind, either expressed, implied, or statutory, including,
|
|
||||||
without limitation, warranties that the Covered Software is free of defects,
|
|
||||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
|
||||||
risk as to the quality and performance of the Covered Software is with You.
|
|
||||||
Should any Covered Software prove defective in any respect, You (not any
|
|
||||||
Contributor) assume the cost of any necessary servicing, repair, or
|
|
||||||
correction. This disclaimer of warranty constitutes an essential part of this
|
|
||||||
License. No use of any Covered Software is authorized under this License
|
|
||||||
except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from such
|
|
||||||
party’s negligence to the extent applicable law prohibits such limitation.
|
|
||||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
|
||||||
consequential damages, so this exclusion and limitation may not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts of
|
|
||||||
a jurisdiction where the defendant maintains its principal place of business
|
|
||||||
and such litigation shall be governed by laws of that jurisdiction, without
|
|
||||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
|
||||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject matter
|
|
||||||
hereof. If any provision of this License is held to be unenforceable, such
|
|
||||||
provision shall be reformed only to the extent necessary to make it
|
|
||||||
enforceable. Any law or regulation which provides that the language of a
|
|
||||||
contract shall be construed against the drafter shall not be used to construe
|
|
||||||
this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version of
|
|
||||||
the License under which You originally received the Covered Software, or
|
|
||||||
under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a modified
|
|
||||||
version of this License if you rename the license and remove any
|
|
||||||
references to the name of the license steward (except to note that such
|
|
||||||
modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
|
||||||
If You choose to distribute Source Code Form that is Incompatible With
|
|
||||||
Secondary Licenses under the terms of this version of the License, the
|
|
||||||
notice described in Exhibit B of this License must be attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file, then
|
|
||||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
|
||||||
directory) where a recipient would be likely to look for such a notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
|
||||||
|
|
||||||
This Source Code Form is “Incompatible
|
|
||||||
With Secondary Licenses”, as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
164
vendor/github.com/hashicorp/go-plugin/README.md
generated
vendored
164
vendor/github.com/hashicorp/go-plugin/README.md
generated
vendored
@ -1,164 +0,0 @@
|
|||||||
# Go Plugin System over RPC
|
|
||||||
|
|
||||||
`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
|
|
||||||
that has been in use by HashiCorp tooling for over 4 years. While initially
|
|
||||||
created for [Packer](https://www.packer.io), it is additionally in use by
|
|
||||||
[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io),
|
|
||||||
[Vault](https://www.vaultproject.io), and
|
|
||||||
[Boundary](https://www.boundaryproject.io).
|
|
||||||
|
|
||||||
While the plugin system is over RPC, it is currently only designed to work
|
|
||||||
over a local [reliable] network. Plugins over a real network are not supported
|
|
||||||
and will lead to unexpected behavior.
|
|
||||||
|
|
||||||
This plugin system has been used on millions of machines across many different
|
|
||||||
projects and has proven to be battle hardened and ready for production use.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
The HashiCorp plugin system supports a number of features:
|
|
||||||
|
|
||||||
**Plugins are Go interface implementations.** This makes writing and consuming
|
|
||||||
plugins feel very natural. To a plugin author: you just implement an
|
|
||||||
interface as if it were going to run in the same process. For a plugin user:
|
|
||||||
you just use and call functions on an interface as if it were in the same
|
|
||||||
process. This plugin system handles the communication in between.
|
|
||||||
|
|
||||||
**Cross-language support.** Plugins can be written (and consumed) by
|
|
||||||
almost every major language. This library supports serving plugins via
|
|
||||||
[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
|
|
||||||
in any language.
|
|
||||||
|
|
||||||
**Complex arguments and return values are supported.** This library
|
|
||||||
provides APIs for handling complex arguments and return values such
|
|
||||||
as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
|
|
||||||
(`MuxBroker`) for creating new connections between the client/server to
|
|
||||||
serve additional interfaces or transfer raw data.
|
|
||||||
|
|
||||||
**Bidirectional communication.** Because the plugin system supports
|
|
||||||
complex arguments, the host process can send it interface implementations
|
|
||||||
and the plugin can call back into the host process.
|
|
||||||
|
|
||||||
**Built-in Logging.** Any plugins that use the `log` standard library
|
|
||||||
will have log data automatically sent to the host process. The host
|
|
||||||
process will mirror this output prefixed with the path to the plugin
|
|
||||||
binary. This makes debugging with plugins simple. If the host system
|
|
||||||
uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
|
|
||||||
will be structured. If the plugin also uses hclog, logs from the plugin
|
|
||||||
will be sent to the host hclog and be structured.
|
|
||||||
|
|
||||||
**Protocol Versioning.** A very basic "protocol version" is supported that
|
|
||||||
can be incremented to invalidate any previous plugins. This is useful when
|
|
||||||
interface signatures are changing, protocol level changes are necessary,
|
|
||||||
etc. When a protocol version is incompatible, a human friendly error
|
|
||||||
message is shown to the end user.
|
|
||||||
|
|
||||||
**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
|
|
||||||
to use stdout/stderr as usual and the output will get mirrored back to
|
|
||||||
the host process. The host process can control what `io.Writer` these
|
|
||||||
streams go to to prevent this from happening.
|
|
||||||
|
|
||||||
**TTY Preservation.** Plugin subprocesses are connected to the identical
|
|
||||||
stdin file descriptor as the host process, allowing software that requires
|
|
||||||
a TTY to work. For example, a plugin can execute `ssh` and even though there
|
|
||||||
are multiple subprocesses and RPC happening, it will look and act perfectly
|
|
||||||
to the end user.
|
|
||||||
|
|
||||||
**Host upgrade while a plugin is running.** Plugins can be "reattached"
|
|
||||||
so that the host process can be upgraded while the plugin is still running.
|
|
||||||
This requires the host/plugin to know this is possible and daemonize
|
|
||||||
properly. `NewClient` takes a `ReattachConfig` to determine if and how to
|
|
||||||
reattach.
|
|
||||||
|
|
||||||
**Cryptographically Secure Plugins.** Plugins can be verified with an expected
|
|
||||||
checksum and RPC communications can be configured to use TLS. The host process
|
|
||||||
must be properly secured to protect this configuration.
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
The HashiCorp plugin system works by launching subprocesses and communicating
|
|
||||||
over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single
|
|
||||||
connection is made between any plugin and the host process. For net/rpc-based
|
|
||||||
plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
|
|
||||||
library to multiplex any other connections on top. For gRPC-based plugins,
|
|
||||||
the HTTP2 protocol handles multiplexing.
|
|
||||||
|
|
||||||
This architecture has a number of benefits:
|
|
||||||
|
|
||||||
* Plugins can't crash your host process: A panic in a plugin doesn't
|
|
||||||
panic the plugin user.
|
|
||||||
|
|
||||||
* Plugins are very easy to write: just write a Go application and `go build`.
|
|
||||||
Or use any other language to write a gRPC server with a tiny amount of
|
|
||||||
boilerplate to support go-plugin.
|
|
||||||
|
|
||||||
* Plugins are very easy to install: just put the binary in a location where
|
|
||||||
the host will find it (depends on the host but this library also provides
|
|
||||||
helpers), and the plugin host handles the rest.
|
|
||||||
|
|
||||||
* Plugins can be relatively secure: The plugin only has access to the
|
|
||||||
interfaces and args given to it, not to the entire memory space of the
|
|
||||||
process. Additionally, go-plugin can communicate with the plugin over
|
|
||||||
TLS.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
To use the plugin system, you must take the following steps. These are
|
|
||||||
high-level steps that must be done. Examples are available in the
|
|
||||||
`examples/` directory.
|
|
||||||
|
|
||||||
1. Choose the interface(s) you want to expose for plugins.
|
|
||||||
|
|
||||||
2. For each interface, implement an implementation of that interface
|
|
||||||
that communicates over a `net/rpc` connection or over a
|
|
||||||
[gRPC](http://www.grpc.io) connection or both. You'll have to implement
|
|
||||||
both a client and server implementation.
|
|
||||||
|
|
||||||
3. Create a `Plugin` implementation that knows how to create the RPC
|
|
||||||
client/server for a given plugin type.
|
|
||||||
|
|
||||||
4. Plugin authors call `plugin.Serve` to serve a plugin from the
|
|
||||||
`main` function.
|
|
||||||
|
|
||||||
5. Plugin users use `plugin.Client` to launch a subprocess and request
|
|
||||||
an interface implementation over RPC.
|
|
||||||
|
|
||||||
That's it! In practice, step 2 is the most tedious and time consuming step.
|
|
||||||
Even so, it isn't very difficult and you can see examples in the `examples/`
|
|
||||||
directory as well as throughout our various open source projects.
|
|
||||||
|
|
||||||
For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
|
|
||||||
|
|
||||||
## Roadmap
|
|
||||||
|
|
||||||
Our plugin system is constantly evolving. As we use the plugin system for
|
|
||||||
new projects or for new features in existing projects, we constantly find
|
|
||||||
improvements we can make.
|
|
||||||
|
|
||||||
At this point in time, the roadmap for the plugin system is:
|
|
||||||
|
|
||||||
**Semantic Versioning.** Plugins will be able to implement a semantic version.
|
|
||||||
This plugin system will give host processes a system for constraining
|
|
||||||
versions. This is in addition to the protocol versioning already present
|
|
||||||
which is more for larger underlying changes.
|
|
||||||
|
|
||||||
## What About Shared Libraries?
|
|
||||||
|
|
||||||
When we started using plugins (late 2012, early 2013), plugins over RPC
|
|
||||||
were the only option since Go didn't support dynamic library loading. Today,
|
|
||||||
Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with
|
|
||||||
a number of limitations. Since 2012, our plugin system has stabilized
|
|
||||||
from tens of millions of users using it, and has many benefits we've come to
|
|
||||||
value greatly.
|
|
||||||
|
|
||||||
For example, we use this plugin system in
|
|
||||||
[Vault](https://www.vaultproject.io) where dynamic library loading is
|
|
||||||
not acceptable for security reasons. That is an extreme
|
|
||||||
example, but we believe our library system has more upsides than downsides
|
|
||||||
over dynamic library loading and since we've had it built and tested for years,
|
|
||||||
we'll continue to use it.
|
|
||||||
|
|
||||||
Shared libraries have one major advantage over our system which is much
|
|
||||||
higher performance. In real world scenarios across our various tools,
|
|
||||||
we've never required any more performance out of our plugin system and it
|
|
||||||
has seen very high throughput, so this isn't a concern for us at the moment.
|
|
1055
vendor/github.com/hashicorp/go-plugin/client.go
generated
vendored
1055
vendor/github.com/hashicorp/go-plugin/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
28
vendor/github.com/hashicorp/go-plugin/discover.go
generated
vendored
28
vendor/github.com/hashicorp/go-plugin/discover.go
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Discover discovers plugins that are in a given directory.
|
|
||||||
//
|
|
||||||
// The directory doesn't need to be absolute. For example, "." will work fine.
|
|
||||||
//
|
|
||||||
// This currently assumes any file matching the glob is a plugin.
|
|
||||||
// In the future this may be smarter about checking that a file is
|
|
||||||
// executable and so on.
|
|
||||||
//
|
|
||||||
// TODO: test
|
|
||||||
func Discover(glob, dir string) ([]string, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Make the directory absolute if it isn't already
|
|
||||||
if !filepath.IsAbs(dir) {
|
|
||||||
dir, err = filepath.Abs(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return filepath.Glob(filepath.Join(dir, glob))
|
|
||||||
}
|
|
24
vendor/github.com/hashicorp/go-plugin/error.go
generated
vendored
24
vendor/github.com/hashicorp/go-plugin/error.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
// This is a type that wraps error types so that they can be messaged
|
|
||||||
// across RPC channels. Since "error" is an interface, we can't always
|
|
||||||
// gob-encode the underlying structure. This is a valid error interface
|
|
||||||
// implementer that we will push across.
|
|
||||||
type BasicError struct {
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicError is used to create a BasicError.
|
|
||||||
//
|
|
||||||
// err is allowed to be nil.
|
|
||||||
func NewBasicError(err error) *BasicError {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BasicError{err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *BasicError) Error() string {
|
|
||||||
return e.Message
|
|
||||||
}
|
|
457
vendor/github.com/hashicorp/go-plugin/grpc_broker.go
generated
vendored
457
vendor/github.com/hashicorp/go-plugin/grpc_broker.go
generated
vendored
@ -1,457 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
|
|
||||||
"github.com/oklog/run"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
// streamer interface is used in the broker to send/receive connection
|
|
||||||
// information.
|
|
||||||
type streamer interface {
|
|
||||||
Send(*plugin.ConnInfo) error
|
|
||||||
Recv() (*plugin.ConnInfo, error)
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendErr is used to pass errors back during a send.
|
|
||||||
type sendErr struct {
|
|
||||||
i *plugin.ConnInfo
|
|
||||||
ch chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
// gRPCBrokerServer is used by the plugin to start a stream and to send
|
|
||||||
// connection information to/from the plugin. Implements GRPCBrokerServer and
|
|
||||||
// streamer interfaces.
|
|
||||||
type gRPCBrokerServer struct {
|
|
||||||
// send is used to send connection info to the gRPC stream.
|
|
||||||
send chan *sendErr
|
|
||||||
|
|
||||||
// recv is used to receive connection info from the gRPC stream.
|
|
||||||
recv chan *plugin.ConnInfo
|
|
||||||
|
|
||||||
// quit closes down the stream.
|
|
||||||
quit chan struct{}
|
|
||||||
|
|
||||||
// o is used to ensure we close the quit channel only once.
|
|
||||||
o sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGRPCBrokerServer() *gRPCBrokerServer {
|
|
||||||
return &gRPCBrokerServer{
|
|
||||||
send: make(chan *sendErr),
|
|
||||||
recv: make(chan *plugin.ConnInfo),
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartStream implements the GRPCBrokerServer interface and will block until
|
|
||||||
// the quit channel is closed or the context reports Done. The stream will pass
|
|
||||||
// connection information to/from the client.
|
|
||||||
func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error {
|
|
||||||
doneCh := stream.Context().Done()
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
// Proccess send stream
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return
|
|
||||||
case <-s.quit:
|
|
||||||
return
|
|
||||||
case se := <-s.send:
|
|
||||||
err := stream.Send(se.i)
|
|
||||||
se.ch <- err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Process receive stream
|
|
||||||
for {
|
|
||||||
i, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return nil
|
|
||||||
case <-s.quit:
|
|
||||||
return nil
|
|
||||||
case s.recv <- i:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send is used by the GRPCBroker to pass connection information into the stream
|
|
||||||
// to the client.
|
|
||||||
func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error {
|
|
||||||
ch := make(chan error)
|
|
||||||
defer close(ch)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return errors.New("broker closed")
|
|
||||||
case s.send <- &sendErr{
|
|
||||||
i: i,
|
|
||||||
ch: ch,
|
|
||||||
}:
|
|
||||||
}
|
|
||||||
|
|
||||||
return <-ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recv is used by the GRPCBroker to pass connection information that has been
|
|
||||||
// sent from the client from the stream to the broker.
|
|
||||||
func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) {
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return nil, errors.New("broker closed")
|
|
||||||
case i := <-s.recv:
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the quit channel, shutting down the stream.
|
|
||||||
func (s *gRPCBrokerServer) Close() {
|
|
||||||
s.o.Do(func() {
|
|
||||||
close(s.quit)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// gRPCBrokerClientImpl is used by the client to start a stream and to send
|
|
||||||
// connection information to/from the client. Implements GRPCBrokerClient and
|
|
||||||
// streamer interfaces.
|
|
||||||
type gRPCBrokerClientImpl struct {
|
|
||||||
// client is the underlying GRPC client used to make calls to the server.
|
|
||||||
client plugin.GRPCBrokerClient
|
|
||||||
|
|
||||||
// send is used to send connection info to the gRPC stream.
|
|
||||||
send chan *sendErr
|
|
||||||
|
|
||||||
// recv is used to receive connection info from the gRPC stream.
|
|
||||||
recv chan *plugin.ConnInfo
|
|
||||||
|
|
||||||
// quit closes down the stream.
|
|
||||||
quit chan struct{}
|
|
||||||
|
|
||||||
// o is used to ensure we close the quit channel only once.
|
|
||||||
o sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
|
|
||||||
return &gRPCBrokerClientImpl{
|
|
||||||
client: plugin.NewGRPCBrokerClient(conn),
|
|
||||||
send: make(chan *sendErr),
|
|
||||||
recv: make(chan *plugin.ConnInfo),
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartStream implements the GRPCBrokerClient interface and will block until
|
|
||||||
// the quit channel is closed or the context reports Done. The stream will pass
|
|
||||||
// connection information to/from the plugin.
|
|
||||||
func (s *gRPCBrokerClientImpl) StartStream() error {
|
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
|
||||||
defer cancelFunc()
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
stream, err := s.client.StartStream(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
doneCh := stream.Context().Done()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return
|
|
||||||
case <-s.quit:
|
|
||||||
return
|
|
||||||
case se := <-s.send:
|
|
||||||
err := stream.Send(se.i)
|
|
||||||
se.ch <- err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
i, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return nil
|
|
||||||
case <-s.quit:
|
|
||||||
return nil
|
|
||||||
case s.recv <- i:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send is used by the GRPCBroker to pass connection information into the stream
|
|
||||||
// to the plugin.
|
|
||||||
func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error {
|
|
||||||
ch := make(chan error)
|
|
||||||
defer close(ch)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return errors.New("broker closed")
|
|
||||||
case s.send <- &sendErr{
|
|
||||||
i: i,
|
|
||||||
ch: ch,
|
|
||||||
}:
|
|
||||||
}
|
|
||||||
|
|
||||||
return <-ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recv is used by the GRPCBroker to pass connection information that has been
|
|
||||||
// sent from the plugin to the broker.
|
|
||||||
func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) {
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return nil, errors.New("broker closed")
|
|
||||||
case i := <-s.recv:
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the quit channel, shutting down the stream.
|
|
||||||
func (s *gRPCBrokerClientImpl) Close() {
|
|
||||||
s.o.Do(func() {
|
|
||||||
close(s.quit)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCBroker is responsible for brokering connections by unique ID.
|
|
||||||
//
|
|
||||||
// It is used by plugins to create multiple gRPC connections and data
|
|
||||||
// streams between the plugin process and the host process.
|
|
||||||
//
|
|
||||||
// This allows a plugin to request a channel with a specific ID to connect to
|
|
||||||
// or accept a connection from, and the broker handles the details of
|
|
||||||
// holding these channels open while they're being negotiated.
|
|
||||||
//
|
|
||||||
// The Plugin interface has access to these for both Server and Client.
|
|
||||||
// The broker can be used by either (optionally) to reserve and connect to
|
|
||||||
// new streams. This is useful for complex args and return values,
|
|
||||||
// or anything else you might need a data stream for.
|
|
||||||
type GRPCBroker struct {
|
|
||||||
nextId uint32
|
|
||||||
streamer streamer
|
|
||||||
streams map[uint32]*gRPCBrokerPending
|
|
||||||
tls *tls.Config
|
|
||||||
doneCh chan struct{}
|
|
||||||
o sync.Once
|
|
||||||
|
|
||||||
sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCBrokerPending struct {
|
|
||||||
ch chan *plugin.ConnInfo
|
|
||||||
doneCh chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker {
|
|
||||||
return &GRPCBroker{
|
|
||||||
streamer: s,
|
|
||||||
streams: make(map[uint32]*gRPCBrokerPending),
|
|
||||||
tls: tls,
|
|
||||||
doneCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept accepts a connection by ID.
|
|
||||||
//
|
|
||||||
// This should not be called multiple times with the same ID at one time.
|
|
||||||
func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
|
|
||||||
listener, err := serverListener()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = b.streamer.Send(&plugin.ConnInfo{
|
|
||||||
ServiceId: id,
|
|
||||||
Network: listener.Addr().Network(),
|
|
||||||
Address: listener.Addr().String(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return listener, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcceptAndServe is used to accept a specific stream ID and immediately
|
|
||||||
// serve a gRPC server on that stream ID. This is used to easily serve
|
|
||||||
// complex arguments. Each AcceptAndServe call opens a new listener socket and
|
|
||||||
// sends the connection info down the stream to the dialer. Since a new
|
|
||||||
// connection is opened every call, these calls should be used sparingly.
|
|
||||||
// Multiple gRPC server implementations can be registered to a single
|
|
||||||
// AcceptAndServe call.
|
|
||||||
func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) {
|
|
||||||
listener, err := b.Accept(id)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer listener.Close()
|
|
||||||
|
|
||||||
var opts []grpc.ServerOption
|
|
||||||
if b.tls != nil {
|
|
||||||
opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))}
|
|
||||||
}
|
|
||||||
|
|
||||||
server := s(opts)
|
|
||||||
|
|
||||||
// Here we use a run group to close this goroutine if the server is shutdown
|
|
||||||
// or the broker is shutdown.
|
|
||||||
var g run.Group
|
|
||||||
{
|
|
||||||
// Serve on the listener, if shutting down call GracefulStop.
|
|
||||||
g.Add(func() error {
|
|
||||||
return server.Serve(listener)
|
|
||||||
}, func(err error) {
|
|
||||||
server.GracefulStop()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// block on the closeCh or the doneCh. If we are shutting down close the
|
|
||||||
// closeCh.
|
|
||||||
closeCh := make(chan struct{})
|
|
||||||
g.Add(func() error {
|
|
||||||
select {
|
|
||||||
case <-b.doneCh:
|
|
||||||
case <-closeCh:
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, func(err error) {
|
|
||||||
close(closeCh)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until we are done
|
|
||||||
g.Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the stream and all servers.
|
|
||||||
func (b *GRPCBroker) Close() error {
|
|
||||||
b.streamer.Close()
|
|
||||||
b.o.Do(func() {
|
|
||||||
close(b.doneCh)
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial opens a connection by ID.
|
|
||||||
func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
|
|
||||||
var c *plugin.ConnInfo
|
|
||||||
|
|
||||||
// Open the stream
|
|
||||||
p := b.getStream(id)
|
|
||||||
select {
|
|
||||||
case c = <-p.ch:
|
|
||||||
close(p.doneCh)
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
return nil, fmt.Errorf("timeout waiting for connection info")
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr net.Addr
|
|
||||||
switch c.Network {
|
|
||||||
case "tcp":
|
|
||||||
addr, err = net.ResolveTCPAddr("tcp", c.Address)
|
|
||||||
case "unix":
|
|
||||||
addr, err = net.ResolveUnixAddr("unix", c.Address)
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("Unknown address type: %s", c.Address)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dialGRPCConn(b.tls, netAddrDialer(addr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextId returns a unique ID to use next.
|
|
||||||
//
|
|
||||||
// It is possible for very long-running plugin hosts to wrap this value,
|
|
||||||
// though it would require a very large amount of calls. In practice
|
|
||||||
// we've never seen it happen.
|
|
||||||
func (m *GRPCBroker) NextId() uint32 {
|
|
||||||
return atomic.AddUint32(&m.nextId, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run starts the brokering and should be executed in a goroutine, since it
|
|
||||||
// blocks forever, or until the session closes.
|
|
||||||
//
|
|
||||||
// Uses of GRPCBroker never need to call this. It is called internally by
|
|
||||||
// the plugin host/client.
|
|
||||||
func (m *GRPCBroker) Run() {
|
|
||||||
for {
|
|
||||||
stream, err := m.streamer.Recv()
|
|
||||||
if err != nil {
|
|
||||||
// Once we receive an error, just exit
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the waiter
|
|
||||||
p := m.getStream(stream.ServiceId)
|
|
||||||
select {
|
|
||||||
case p.ch <- stream:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
go m.timeoutWait(stream.ServiceId, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
p, ok := m.streams[id]
|
|
||||||
if ok {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
m.streams[id] = &gRPCBrokerPending{
|
|
||||||
ch: make(chan *plugin.ConnInfo, 1),
|
|
||||||
doneCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
return m.streams[id]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) {
|
|
||||||
// Wait for the stream to either be picked up and connected, or
|
|
||||||
// for a timeout.
|
|
||||||
select {
|
|
||||||
case <-p.doneCh:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
// Delete the stream so no one else can grab it
|
|
||||||
delete(m.streams, id)
|
|
||||||
}
|
|
126
vendor/github.com/hashicorp/go-plugin/grpc_client.go
generated
vendored
126
vendor/github.com/hashicorp/go-plugin/grpc_client.go
generated
vendored
@ -1,126 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
"google.golang.org/grpc/health/grpc_health_v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
|
||||||
// Build dialing options.
|
|
||||||
opts := make([]grpc.DialOption, 0)
|
|
||||||
|
|
||||||
// We use a custom dialer so that we can connect over unix domain sockets.
|
|
||||||
opts = append(opts, grpc.WithDialer(dialer))
|
|
||||||
|
|
||||||
// Fail right away
|
|
||||||
opts = append(opts, grpc.FailOnNonTempDialError(true))
|
|
||||||
|
|
||||||
// If we have no TLS configuration set, we need to explicitly tell grpc
|
|
||||||
// that we're connecting with an insecure connection.
|
|
||||||
if tls == nil {
|
|
||||||
opts = append(opts, grpc.WithInsecure())
|
|
||||||
} else {
|
|
||||||
opts = append(opts, grpc.WithTransportCredentials(
|
|
||||||
credentials.NewTLS(tls)))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(opts,
|
|
||||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
|
|
||||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32)))
|
|
||||||
|
|
||||||
// Add our custom options if we have any
|
|
||||||
opts = append(opts, dialOpts...)
|
|
||||||
|
|
||||||
// Connect. Note the first parameter is unused because we use a custom
|
|
||||||
// dialer that has the state to see the address.
|
|
||||||
conn, err := grpc.Dial("unused", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newGRPCClient creates a new GRPCClient. The Client argument is expected
|
|
||||||
// to be successfully started already with a lock held.
|
|
||||||
func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
|
|
||||||
conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the broker.
|
|
||||||
brokerGRPCClient := newGRPCBrokerClient(conn)
|
|
||||||
broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig)
|
|
||||||
go broker.Run()
|
|
||||||
go brokerGRPCClient.StartStream()
|
|
||||||
|
|
||||||
// Start the stdio client
|
|
||||||
stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr)
|
|
||||||
|
|
||||||
cl := &GRPCClient{
|
|
||||||
Conn: conn,
|
|
||||||
Plugins: c.config.Plugins,
|
|
||||||
doneCtx: doneCtx,
|
|
||||||
broker: broker,
|
|
||||||
controller: plugin.NewGRPCControllerClient(conn),
|
|
||||||
}
|
|
||||||
|
|
||||||
return cl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
|
|
||||||
type GRPCClient struct {
|
|
||||||
Conn *grpc.ClientConn
|
|
||||||
Plugins map[string]Plugin
|
|
||||||
|
|
||||||
doneCtx context.Context
|
|
||||||
broker *GRPCBroker
|
|
||||||
|
|
||||||
controller plugin.GRPCControllerClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientProtocol impl.
|
|
||||||
func (c *GRPCClient) Close() error {
|
|
||||||
c.broker.Close()
|
|
||||||
c.controller.Shutdown(c.doneCtx, &plugin.Empty{})
|
|
||||||
return c.Conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientProtocol impl.
|
|
||||||
func (c *GRPCClient) Dispense(name string) (interface{}, error) {
|
|
||||||
raw, ok := c.Plugins[name]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown plugin type: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, ok := raw.(GRPCPlugin)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.GRPCClient(c.doneCtx, c.broker, c.Conn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientProtocol impl.
|
|
||||||
func (c *GRPCClient) Ping() error {
|
|
||||||
client := grpc_health_v1.NewHealthClient(c.Conn)
|
|
||||||
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
|
|
||||||
Service: GRPCServiceName,
|
|
||||||
})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
23
vendor/github.com/hashicorp/go-plugin/grpc_controller.go
generated
vendored
23
vendor/github.com/hashicorp/go-plugin/grpc_controller.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GRPCControllerServer handles shutdown calls to terminate the server when the
|
|
||||||
// plugin client is closed.
|
|
||||||
type grpcControllerServer struct {
|
|
||||||
server *GRPCServer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown stops the grpc server. It first will attempt a graceful stop, then a
|
|
||||||
// full stop on the server.
|
|
||||||
func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) {
|
|
||||||
resp := &plugin.Empty{}
|
|
||||||
|
|
||||||
// TODO: figure out why GracefullStop doesn't work.
|
|
||||||
s.server.Stop()
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
149
vendor/github.com/hashicorp/go-plugin/grpc_server.go
generated
vendored
149
vendor/github.com/hashicorp/go-plugin/grpc_server.go
generated
vendored
@ -1,149 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
|
|
||||||
hclog "github.com/hashicorp/go-hclog"
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
"google.golang.org/grpc/health"
|
|
||||||
"google.golang.org/grpc/health/grpc_health_v1"
|
|
||||||
"google.golang.org/grpc/reflection"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GRPCServiceName is the name of the service that the health check should
|
|
||||||
// return as passing.
|
|
||||||
const GRPCServiceName = "plugin"
|
|
||||||
|
|
||||||
// DefaultGRPCServer can be used with the "GRPCServer" field for Server
|
|
||||||
// as a default factory method to create a gRPC server with no extra options.
|
|
||||||
func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
|
|
||||||
return grpc.NewServer(opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCServer is a ServerType implementation that serves plugins over
|
|
||||||
// gRPC. This allows plugins to easily be written for other languages.
|
|
||||||
//
|
|
||||||
// The GRPCServer outputs a custom configuration as a base64-encoded
|
|
||||||
// JSON structure represented by the GRPCServerConfig config structure.
|
|
||||||
type GRPCServer struct {
|
|
||||||
// Plugins are the list of plugins to serve.
|
|
||||||
Plugins map[string]Plugin
|
|
||||||
|
|
||||||
// Server is the actual server that will accept connections. This
|
|
||||||
// will be used for plugin registration as well.
|
|
||||||
Server func([]grpc.ServerOption) *grpc.Server
|
|
||||||
|
|
||||||
// TLS should be the TLS configuration if available. If this is nil,
|
|
||||||
// the connection will not have transport security.
|
|
||||||
TLS *tls.Config
|
|
||||||
|
|
||||||
// DoneCh is the channel that is closed when this server has exited.
|
|
||||||
DoneCh chan struct{}
|
|
||||||
|
|
||||||
// Stdout/StderrLis are the readers for stdout/stderr that will be copied
|
|
||||||
// to the stdout/stderr connection that is output.
|
|
||||||
Stdout io.Reader
|
|
||||||
Stderr io.Reader
|
|
||||||
|
|
||||||
config GRPCServerConfig
|
|
||||||
server *grpc.Server
|
|
||||||
broker *GRPCBroker
|
|
||||||
stdioServer *grpcStdioServer
|
|
||||||
|
|
||||||
logger hclog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerProtocol impl.
|
|
||||||
func (s *GRPCServer) Init() error {
|
|
||||||
// Create our server
|
|
||||||
var opts []grpc.ServerOption
|
|
||||||
if s.TLS != nil {
|
|
||||||
opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
|
|
||||||
}
|
|
||||||
s.server = s.Server(opts)
|
|
||||||
|
|
||||||
// Register the health service
|
|
||||||
healthCheck := health.NewServer()
|
|
||||||
healthCheck.SetServingStatus(
|
|
||||||
GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
|
|
||||||
grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
|
|
||||||
|
|
||||||
// Register the reflection service
|
|
||||||
reflection.Register(s.server)
|
|
||||||
|
|
||||||
// Register the broker service
|
|
||||||
brokerServer := newGRPCBrokerServer()
|
|
||||||
plugin.RegisterGRPCBrokerServer(s.server, brokerServer)
|
|
||||||
s.broker = newGRPCBroker(brokerServer, s.TLS)
|
|
||||||
go s.broker.Run()
|
|
||||||
|
|
||||||
// Register the controller
|
|
||||||
controllerServer := &grpcControllerServer{server: s}
|
|
||||||
plugin.RegisterGRPCControllerServer(s.server, controllerServer)
|
|
||||||
|
|
||||||
// Register the stdio service
|
|
||||||
s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr)
|
|
||||||
plugin.RegisterGRPCStdioServer(s.server, s.stdioServer)
|
|
||||||
|
|
||||||
// Register all our plugins onto the gRPC server.
|
|
||||||
for k, raw := range s.Plugins {
|
|
||||||
p, ok := raw.(GRPCPlugin)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("%q is not a GRPC-compatible plugin", k)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.GRPCServer(s.broker, s.server); err != nil {
|
|
||||||
return fmt.Errorf("error registering %q: %s", k, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop calls Stop on the underlying grpc.Server
|
|
||||||
func (s *GRPCServer) Stop() {
|
|
||||||
s.server.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GracefulStop calls GracefulStop on the underlying grpc.Server
|
|
||||||
func (s *GRPCServer) GracefulStop() {
|
|
||||||
s.server.GracefulStop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is the GRPCServerConfig encoded as JSON then base64.
|
|
||||||
func (s *GRPCServer) Config() string {
|
|
||||||
// Create a buffer that will contain our final contents
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
// Wrap the base64 encoding with JSON encoding.
|
|
||||||
if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
|
|
||||||
// We panic since ths shouldn't happen under any scenario. We
|
|
||||||
// carefully control the structure being encoded here and it should
|
|
||||||
// always be successful.
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *GRPCServer) Serve(lis net.Listener) {
|
|
||||||
defer close(s.DoneCh)
|
|
||||||
err := s.server.Serve(lis)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("grpc server", "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCServerConfig is the extra configuration passed along for consumers
|
|
||||||
// to facilitate using GRPC plugins.
|
|
||||||
type GRPCServerConfig struct {
|
|
||||||
StdoutAddr string `json:"stdout_addr"`
|
|
||||||
StderrAddr string `json:"stderr_addr"`
|
|
||||||
}
|
|
207
vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
generated
vendored
207
vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
generated
vendored
@ -1,207 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
empty "github.com/golang/protobuf/ptypes/empty"
|
|
||||||
hclog "github.com/hashicorp/go-hclog"
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of
|
|
||||||
// stdio data. This is currently 1 KB for no reason other than that seems like
|
|
||||||
// enough (stdio data isn't that common) and is fairly low.
|
|
||||||
const grpcStdioBuffer = 1 * 1024
|
|
||||||
|
|
||||||
// grpcStdioServer implements the Stdio service and streams stdiout/stderr.
|
|
||||||
type grpcStdioServer struct {
|
|
||||||
stdoutCh <-chan []byte
|
|
||||||
stderrCh <-chan []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// newGRPCStdioServer creates a new grpcStdioServer and starts the stream
|
|
||||||
// copying for the given out and err readers.
|
|
||||||
//
|
|
||||||
// This must only be called ONCE per srcOut, srcErr.
|
|
||||||
func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer {
|
|
||||||
stdoutCh := make(chan []byte)
|
|
||||||
stderrCh := make(chan []byte)
|
|
||||||
|
|
||||||
// Begin copying the streams
|
|
||||||
go copyChan(log, stdoutCh, srcOut)
|
|
||||||
go copyChan(log, stderrCh, srcErr)
|
|
||||||
|
|
||||||
// Construct our server
|
|
||||||
return &grpcStdioServer{
|
|
||||||
stdoutCh: stdoutCh,
|
|
||||||
stderrCh: stderrCh,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamStdio streams our stdout/err as the response.
|
|
||||||
func (s *grpcStdioServer) StreamStdio(
|
|
||||||
_ *empty.Empty,
|
|
||||||
srv plugin.GRPCStdio_StreamStdioServer,
|
|
||||||
) error {
|
|
||||||
// Share the same data value between runs. Sending this over the wire
|
|
||||||
// marshals it so we can reuse this.
|
|
||||||
var data plugin.StdioData
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Read our data
|
|
||||||
select {
|
|
||||||
case data.Data = <-s.stdoutCh:
|
|
||||||
data.Channel = plugin.StdioData_STDOUT
|
|
||||||
|
|
||||||
case data.Data = <-s.stderrCh:
|
|
||||||
data.Channel = plugin.StdioData_STDERR
|
|
||||||
|
|
||||||
case <-srv.Context().Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not sure if this is possible, but if we somehow got here and
|
|
||||||
// we didn't populate any data at all, then just continue.
|
|
||||||
if len(data.Data) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send our data to the client.
|
|
||||||
if err := srv.Send(&data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// grpcStdioClient wraps the stdio service as a client to copy
|
|
||||||
// the stdio data to output writers.
|
|
||||||
type grpcStdioClient struct {
|
|
||||||
log hclog.Logger
|
|
||||||
stdioClient plugin.GRPCStdio_StreamStdioClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// newGRPCStdioClient creates a grpcStdioClient. This will perform the
|
|
||||||
// initial connection to the stdio service. If the stdio service is unavailable
|
|
||||||
// then this will be a no-op. This allows this to work without error for
|
|
||||||
// plugins that don't support this.
|
|
||||||
func newGRPCStdioClient(
|
|
||||||
ctx context.Context,
|
|
||||||
log hclog.Logger,
|
|
||||||
conn *grpc.ClientConn,
|
|
||||||
) (*grpcStdioClient, error) {
|
|
||||||
client := plugin.NewGRPCStdioClient(conn)
|
|
||||||
|
|
||||||
// Connect immediately to the endpoint
|
|
||||||
stdioClient, err := client.StreamStdio(ctx, &empty.Empty{})
|
|
||||||
|
|
||||||
// If we get an Unavailable or Unimplemented error, this means that the plugin isn't
|
|
||||||
// updated and linking to the latest version of go-plugin that supports
|
|
||||||
// this. We fall back to the previous behavior of just not syncing anything.
|
|
||||||
if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented {
|
|
||||||
log.Warn("stdio service not available, stdout/stderr syncing unavailable")
|
|
||||||
stdioClient = nil
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &grpcStdioClient{
|
|
||||||
log: log,
|
|
||||||
stdioClient: stdioClient,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run starts the loop that receives stdio data and writes it to the given
|
|
||||||
// writers. This blocks and should be run in a goroutine.
|
|
||||||
func (c *grpcStdioClient) Run(stdout, stderr io.Writer) {
|
|
||||||
// This will be nil if stdio is not supported by the plugin
|
|
||||||
if c.stdioClient == nil {
|
|
||||||
c.log.Warn("stdio service unavailable, run will do nothing")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
c.log.Trace("waiting for stdio data")
|
|
||||||
data, err := c.stdioClient.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF ||
|
|
||||||
status.Code(err) == codes.Unavailable ||
|
|
||||||
status.Code(err) == codes.Canceled ||
|
|
||||||
status.Code(err) == codes.Unimplemented ||
|
|
||||||
err == context.Canceled {
|
|
||||||
c.log.Debug("received EOF, stopping recv loop", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.log.Error("error receiving data", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine our output writer based on channel
|
|
||||||
var w io.Writer
|
|
||||||
switch data.Channel {
|
|
||||||
case plugin.StdioData_STDOUT:
|
|
||||||
w = stdout
|
|
||||||
|
|
||||||
case plugin.StdioData_STDERR:
|
|
||||||
w = stderr
|
|
||||||
|
|
||||||
default:
|
|
||||||
c.log.Warn("unknown channel, dropping", "channel", data.Channel)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write! In the event of an error we just continue.
|
|
||||||
if c.log.IsTrace() {
|
|
||||||
c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data))
|
|
||||||
}
|
|
||||||
if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil {
|
|
||||||
c.log.Error("failed to copy all bytes", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyChan copies an io.Reader into a channel.
|
|
||||||
func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) {
|
|
||||||
bufsrc := bufio.NewReader(src)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Make our data buffer. We allocate a new one per loop iteration
|
|
||||||
// so that we can send it over the channel.
|
|
||||||
var data [1024]byte
|
|
||||||
|
|
||||||
// Read the data, this will block until data is available
|
|
||||||
n, err := bufsrc.Read(data[:])
|
|
||||||
|
|
||||||
// We have to check if we have data BEFORE err != nil. The bufio
|
|
||||||
// docs guarantee n == 0 on EOF but its better to be safe here.
|
|
||||||
if n > 0 {
|
|
||||||
// We have data! Send it on the channel. This will block if there
|
|
||||||
// is no reader on the other side. We expect that go-plugin will
|
|
||||||
// connect immediately to the stdio server to drain this so we want
|
|
||||||
// this block to happen for backpressure.
|
|
||||||
dst <- data[:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we hit EOF we're done copying
|
|
||||||
if err == io.EOF {
|
|
||||||
log.Debug("stdio EOF, exiting copy loop")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any other error we just exit the loop. We don't expect there to
|
|
||||||
// be errors since our use case for this is reading/writing from
|
|
||||||
// a in-process pipe (os.Pipe).
|
|
||||||
if err != nil {
|
|
||||||
log.Warn("error copying stdio data, stopping copy", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
3
vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
generated
vendored
3
vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:.
|
|
||||||
|
|
||||||
package plugin
|
|
203
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
generated
vendored
203
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
generated
vendored
@ -1,203 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: grpc_broker.proto
|
|
||||||
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "golang.org/x/net/context"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type ConnInfo struct {
|
|
||||||
ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
|
|
||||||
Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
|
|
||||||
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ConnInfo) Reset() { *m = ConnInfo{} }
|
|
||||||
func (m *ConnInfo) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ConnInfo) ProtoMessage() {}
|
|
||||||
func (*ConnInfo) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_grpc_broker_3322b07398605250, []int{0}
|
|
||||||
}
|
|
||||||
func (m *ConnInfo) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ConnInfo.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *ConnInfo) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ConnInfo.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *ConnInfo) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ConnInfo.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ConnInfo) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ConnInfo.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ConnInfo proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ConnInfo) GetServiceId() uint32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.ServiceId
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ConnInfo) GetNetwork() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Network
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ConnInfo) GetAddress() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Address
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// GRPCBrokerClient is the client API for GRPCBroker service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type GRPCBrokerClient interface {
|
|
||||||
StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCBrokerClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient {
|
|
||||||
return &gRPCBrokerClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
|
|
||||||
stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &gRPCBrokerStartStreamClient{stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GRPCBroker_StartStreamClient interface {
|
|
||||||
Send(*ConnInfo) error
|
|
||||||
Recv() (*ConnInfo, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCBrokerStartStreamClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error {
|
|
||||||
return x.ClientStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
|
|
||||||
m := new(ConnInfo)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCBrokerServer is the server API for GRPCBroker service.
|
|
||||||
type GRPCBrokerServer interface {
|
|
||||||
StartStream(GRPCBroker_StartStreamServer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) {
|
|
||||||
s.RegisterService(&_GRPCBroker_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
type GRPCBroker_StartStreamServer interface {
|
|
||||||
Send(*ConnInfo) error
|
|
||||||
Recv() (*ConnInfo, error)
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCBrokerStartStreamServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) {
|
|
||||||
m := new(ConnInfo)
|
|
||||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _GRPCBroker_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "plugin.GRPCBroker",
|
|
||||||
HandlerType: (*GRPCBrokerServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{},
|
|
||||||
Streams: []grpc.StreamDesc{
|
|
||||||
{
|
|
||||||
StreamName: "StartStream",
|
|
||||||
Handler: _GRPCBroker_StartStream_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Metadata: "grpc_broker.proto",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) }
|
|
||||||
|
|
||||||
var fileDescriptor_grpc_broker_3322b07398605250 = []byte{
|
|
||||||
// 175 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
|
|
||||||
0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
|
|
||||||
0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
|
|
||||||
0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
|
|
||||||
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
|
|
||||||
0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
|
|
||||||
0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
|
|
||||||
0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
|
|
||||||
0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
|
|
||||||
0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b,
|
|
||||||
0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00,
|
|
||||||
}
|
|
13
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
generated
vendored
13
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
package plugin;
|
|
||||||
option go_package = "plugin";
|
|
||||||
|
|
||||||
message ConnInfo {
|
|
||||||
uint32 service_id = 1;
|
|
||||||
string network = 2;
|
|
||||||
string address = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
service GRPCBroker {
|
|
||||||
rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
|
|
||||||
}
|
|
145
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
generated
vendored
145
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
generated
vendored
@ -1,145 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: grpc_controller.proto
|
|
||||||
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "golang.org/x/net/context"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type Empty struct {
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Empty) Reset() { *m = Empty{} }
|
|
||||||
func (m *Empty) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Empty) ProtoMessage() {}
|
|
||||||
func (*Empty) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Empty.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *Empty) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Empty.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *Empty) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Empty.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Empty) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Empty.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Empty)(nil), "plugin.Empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// GRPCControllerClient is the client API for GRPCController service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type GRPCControllerClient interface {
|
|
||||||
Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCControllerClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient {
|
|
||||||
return &gRPCControllerClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
|
|
||||||
out := new(Empty)
|
|
||||||
err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCControllerServer is the server API for GRPCController service.
|
|
||||||
type GRPCControllerServer interface {
|
|
||||||
Shutdown(context.Context, *Empty) (*Empty, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) {
|
|
||||||
s.RegisterService(&_GRPCController_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(Empty)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(GRPCControllerServer).Shutdown(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/plugin.GRPCController/Shutdown",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _GRPCController_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "plugin.GRPCController",
|
|
||||||
HandlerType: (*GRPCControllerServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "Shutdown",
|
|
||||||
Handler: _GRPCController_Shutdown_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "grpc_controller.proto",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{
|
|
||||||
// 108 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48,
|
|
||||||
0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f,
|
|
||||||
0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d,
|
|
||||||
0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0,
|
|
||||||
0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03,
|
|
||||||
0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08,
|
|
||||||
0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00,
|
|
||||||
}
|
|
11
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
generated
vendored
11
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
package plugin;
|
|
||||||
option go_package = "plugin";
|
|
||||||
|
|
||||||
message Empty {
|
|
||||||
}
|
|
||||||
|
|
||||||
// The GRPCController is responsible for telling the plugin server to shutdown.
|
|
||||||
service GRPCController {
|
|
||||||
rpc Shutdown(Empty) returns (Empty);
|
|
||||||
}
|
|
233
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
generated
vendored
233
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
generated
vendored
@ -1,233 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: grpc_stdio.proto
|
|
||||||
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
import empty "github.com/golang/protobuf/ptypes/empty"
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "golang.org/x/net/context"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type StdioData_Channel int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
StdioData_INVALID StdioData_Channel = 0
|
|
||||||
StdioData_STDOUT StdioData_Channel = 1
|
|
||||||
StdioData_STDERR StdioData_Channel = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
var StdioData_Channel_name = map[int32]string{
|
|
||||||
0: "INVALID",
|
|
||||||
1: "STDOUT",
|
|
||||||
2: "STDERR",
|
|
||||||
}
|
|
||||||
var StdioData_Channel_value = map[string]int32{
|
|
||||||
"INVALID": 0,
|
|
||||||
"STDOUT": 1,
|
|
||||||
"STDERR": 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x StdioData_Channel) String() string {
|
|
||||||
return proto.EnumName(StdioData_Channel_name, int32(x))
|
|
||||||
}
|
|
||||||
func (StdioData_Channel) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdioData is a single chunk of stdout or stderr data that is streamed
|
|
||||||
// from GRPCStdio.
|
|
||||||
type StdioData struct {
|
|
||||||
Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"`
|
|
||||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StdioData) Reset() { *m = StdioData{} }
|
|
||||||
func (m *StdioData) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*StdioData) ProtoMessage() {}
|
|
||||||
func (*StdioData) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0}
|
|
||||||
}
|
|
||||||
func (m *StdioData) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_StdioData.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_StdioData.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *StdioData) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_StdioData.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *StdioData) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_StdioData.Size(m)
|
|
||||||
}
|
|
||||||
func (m *StdioData) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_StdioData.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_StdioData proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *StdioData) GetChannel() StdioData_Channel {
|
|
||||||
if m != nil {
|
|
||||||
return m.Channel
|
|
||||||
}
|
|
||||||
return StdioData_INVALID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StdioData) GetData() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Data
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*StdioData)(nil), "plugin.StdioData")
|
|
||||||
proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// GRPCStdioClient is the client API for GRPCStdio service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type GRPCStdioClient interface {
|
|
||||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
|
||||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
|
||||||
// it is not sent again.
|
|
||||||
//
|
|
||||||
// Callers should connect early to prevent blocking on the plugin process.
|
|
||||||
StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCStdioClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient {
|
|
||||||
return &gRPCStdioClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) {
|
|
||||||
stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &gRPCStdioStreamStdioClient{stream}
|
|
||||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := x.ClientStream.CloseSend(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GRPCStdio_StreamStdioClient interface {
|
|
||||||
Recv() (*StdioData, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCStdioStreamStdioClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) {
|
|
||||||
m := new(StdioData)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCStdioServer is the server API for GRPCStdio service.
|
|
||||||
type GRPCStdioServer interface {
|
|
||||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
|
||||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
|
||||||
// it is not sent again.
|
|
||||||
//
|
|
||||||
// Callers should connect early to prevent blocking on the plugin process.
|
|
||||||
StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) {
|
|
||||||
s.RegisterService(&_GRPCStdio_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
m := new(empty.Empty)
|
|
||||||
if err := stream.RecvMsg(m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
type GRPCStdio_StreamStdioServer interface {
|
|
||||||
Send(*StdioData) error
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type gRPCStdioStreamStdioServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _GRPCStdio_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "plugin.GRPCStdio",
|
|
||||||
HandlerType: (*GRPCStdioServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{},
|
|
||||||
Streams: []grpc.StreamDesc{
|
|
||||||
{
|
|
||||||
StreamName: "StreamStdio",
|
|
||||||
Handler: _GRPCStdio_StreamStdio_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Metadata: "grpc_stdio.proto",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) }
|
|
||||||
|
|
||||||
var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{
|
|
||||||
// 221 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48,
|
|
||||||
0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8,
|
|
||||||
0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26,
|
|
||||||
0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06,
|
|
||||||
0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5,
|
|
||||||
0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1,
|
|
||||||
0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60,
|
|
||||||
0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9,
|
|
||||||
0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43,
|
|
||||||
0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e,
|
|
||||||
0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89,
|
|
||||||
0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25,
|
|
||||||
0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01,
|
|
||||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
30
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
generated
vendored
30
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
package plugin;
|
|
||||||
option go_package = "plugin";
|
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
|
|
||||||
// GRPCStdio is a service that is automatically run by the plugin process
|
|
||||||
// to stream any stdout/err data so that it can be mirrored on the plugin
|
|
||||||
// host side.
|
|
||||||
service GRPCStdio {
|
|
||||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
|
||||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
|
||||||
// it is not sent again.
|
|
||||||
//
|
|
||||||
// Callers should connect early to prevent blocking on the plugin process.
|
|
||||||
rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData);
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdioData is a single chunk of stdout or stderr data that is streamed
|
|
||||||
// from GRPCStdio.
|
|
||||||
message StdioData {
|
|
||||||
enum Channel {
|
|
||||||
INVALID = 0;
|
|
||||||
STDOUT = 1;
|
|
||||||
STDERR = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
Channel channel = 1;
|
|
||||||
bytes data = 2;
|
|
||||||
}
|
|
73
vendor/github.com/hashicorp/go-plugin/log_entry.go
generated
vendored
73
vendor/github.com/hashicorp/go-plugin/log_entry.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
|
|
||||||
type logEntry struct {
|
|
||||||
Message string `json:"@message"`
|
|
||||||
Level string `json:"@level"`
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
KVPairs []*logEntryKV `json:"kv_pairs"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// logEntryKV is a key value pair within the Output payload
|
|
||||||
type logEntryKV struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
Value interface{} `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// flattenKVPairs is used to flatten KVPair slice into []interface{}
|
|
||||||
// for hclog consumption.
|
|
||||||
func flattenKVPairs(kvs []*logEntryKV) []interface{} {
|
|
||||||
var result []interface{}
|
|
||||||
for _, kv := range kvs {
|
|
||||||
result = append(result, kv.Key)
|
|
||||||
result = append(result, kv.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseJSON handles parsing JSON output
|
|
||||||
func parseJSON(input []byte) (*logEntry, error) {
|
|
||||||
var raw map[string]interface{}
|
|
||||||
entry := &logEntry{}
|
|
||||||
|
|
||||||
err := json.Unmarshal(input, &raw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse hclog-specific objects
|
|
||||||
if v, ok := raw["@message"]; ok {
|
|
||||||
entry.Message = v.(string)
|
|
||||||
delete(raw, "@message")
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := raw["@level"]; ok {
|
|
||||||
entry.Level = v.(string)
|
|
||||||
delete(raw, "@level")
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := raw["@timestamp"]; ok {
|
|
||||||
t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.Timestamp = t
|
|
||||||
delete(raw, "@timestamp")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse dynamic KV args from the hclog payload.
|
|
||||||
for k, v := range raw {
|
|
||||||
entry.KVPairs = append(entry.KVPairs, &logEntryKV{
|
|
||||||
Key: k,
|
|
||||||
Value: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
73
vendor/github.com/hashicorp/go-plugin/mtls.go
generated
vendored
73
vendor/github.com/hashicorp/go-plugin/mtls.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"math/big"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// generateCert generates a temporary certificate for plugin authentication. The
|
|
||||||
// certificate and private key are returns in PEM format.
|
|
||||||
func generateCert() (cert []byte, privateKey []byte, err error) {
|
|
||||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
|
||||||
sn, err := rand.Int(rand.Reader, serialNumberLimit)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
host := "localhost"
|
|
||||||
|
|
||||||
template := &x509.Certificate{
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: host,
|
|
||||||
Organization: []string{"HashiCorp"},
|
|
||||||
},
|
|
||||||
DNSNames: []string{host},
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
|
||||||
x509.ExtKeyUsageClientAuth,
|
|
||||||
x509.ExtKeyUsageServerAuth,
|
|
||||||
},
|
|
||||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
SerialNumber: sn,
|
|
||||||
NotBefore: time.Now().Add(-30 * time.Second),
|
|
||||||
NotAfter: time.Now().Add(262980 * time.Hour),
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var certOut bytes.Buffer
|
|
||||||
if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBytes, err := x509.MarshalECPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var keyOut bytes.Buffer
|
|
||||||
if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cert = certOut.Bytes()
|
|
||||||
privateKey = keyOut.Bytes()
|
|
||||||
|
|
||||||
return cert, privateKey, nil
|
|
||||||
}
|
|
204
vendor/github.com/hashicorp/go-plugin/mux_broker.go
generated
vendored
204
vendor/github.com/hashicorp/go-plugin/mux_broker.go
generated
vendored
@ -1,204 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/yamux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MuxBroker is responsible for brokering multiplexed connections by unique ID.
|
|
||||||
//
|
|
||||||
// It is used by plugins to multiplex multiple RPC connections and data
|
|
||||||
// streams on top of a single connection between the plugin process and the
|
|
||||||
// host process.
|
|
||||||
//
|
|
||||||
// This allows a plugin to request a channel with a specific ID to connect to
|
|
||||||
// or accept a connection from, and the broker handles the details of
|
|
||||||
// holding these channels open while they're being negotiated.
|
|
||||||
//
|
|
||||||
// The Plugin interface has access to these for both Server and Client.
|
|
||||||
// The broker can be used by either (optionally) to reserve and connect to
|
|
||||||
// new multiplexed streams. This is useful for complex args and return values,
|
|
||||||
// or anything else you might need a data stream for.
|
|
||||||
type MuxBroker struct {
|
|
||||||
nextId uint32
|
|
||||||
session *yamux.Session
|
|
||||||
streams map[uint32]*muxBrokerPending
|
|
||||||
|
|
||||||
sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type muxBrokerPending struct {
|
|
||||||
ch chan net.Conn
|
|
||||||
doneCh chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMuxBroker(s *yamux.Session) *MuxBroker {
|
|
||||||
return &MuxBroker{
|
|
||||||
session: s,
|
|
||||||
streams: make(map[uint32]*muxBrokerPending),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept accepts a connection by ID.
|
|
||||||
//
|
|
||||||
// This should not be called multiple times with the same ID at one time.
|
|
||||||
func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
|
|
||||||
var c net.Conn
|
|
||||||
p := m.getStream(id)
|
|
||||||
select {
|
|
||||||
case c = <-p.ch:
|
|
||||||
close(p.doneCh)
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
delete(m.streams, id)
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("timeout waiting for accept")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ack our connection
|
|
||||||
if err := binary.Write(c, binary.LittleEndian, id); err != nil {
|
|
||||||
c.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcceptAndServe is used to accept a specific stream ID and immediately
|
|
||||||
// serve an RPC server on that stream ID. This is used to easily serve
|
|
||||||
// complex arguments.
|
|
||||||
//
|
|
||||||
// The served interface is always registered to the "Plugin" name.
|
|
||||||
func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
|
|
||||||
conn, err := m.Accept(id)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
serve(conn, "Plugin", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection and all sub-connections.
|
|
||||||
func (m *MuxBroker) Close() error {
|
|
||||||
return m.session.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial opens a connection by ID.
|
|
||||||
func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
|
|
||||||
// Open the stream
|
|
||||||
stream, err := m.session.OpenStream()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the stream ID onto the wire.
|
|
||||||
if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
|
|
||||||
stream.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the ack that we connected. Then we're off!
|
|
||||||
var ack uint32
|
|
||||||
if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
|
|
||||||
stream.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ack != id {
|
|
||||||
stream.Close()
|
|
||||||
return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stream, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextId returns a unique ID to use next.
|
|
||||||
//
|
|
||||||
// It is possible for very long-running plugin hosts to wrap this value,
|
|
||||||
// though it would require a very large amount of RPC calls. In practice
|
|
||||||
// we've never seen it happen.
|
|
||||||
func (m *MuxBroker) NextId() uint32 {
|
|
||||||
return atomic.AddUint32(&m.nextId, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run starts the brokering and should be executed in a goroutine, since it
|
|
||||||
// blocks forever, or until the session closes.
|
|
||||||
//
|
|
||||||
// Uses of MuxBroker never need to call this. It is called internally by
|
|
||||||
// the plugin host/client.
|
|
||||||
func (m *MuxBroker) Run() {
|
|
||||||
for {
|
|
||||||
stream, err := m.session.AcceptStream()
|
|
||||||
if err != nil {
|
|
||||||
// Once we receive an error, just exit
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the stream ID from the stream
|
|
||||||
var id uint32
|
|
||||||
if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
|
|
||||||
stream.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the waiter
|
|
||||||
p := m.getStream(id)
|
|
||||||
select {
|
|
||||||
case p.ch <- stream:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for a timeout
|
|
||||||
go m.timeoutWait(id, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
p, ok := m.streams[id]
|
|
||||||
if ok {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
m.streams[id] = &muxBrokerPending{
|
|
||||||
ch: make(chan net.Conn, 1),
|
|
||||||
doneCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
return m.streams[id]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
|
|
||||||
// Wait for the stream to either be picked up and connected, or
|
|
||||||
// for a timeout.
|
|
||||||
timeout := false
|
|
||||||
select {
|
|
||||||
case <-p.doneCh:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
timeout = true
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
// Delete the stream so no one else can grab it
|
|
||||||
delete(m.streams, id)
|
|
||||||
|
|
||||||
// If we timed out, then check if we have a channel in the buffer,
|
|
||||||
// and if so, close it.
|
|
||||||
if timeout {
|
|
||||||
select {
|
|
||||||
case s := <-p.ch:
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
58
vendor/github.com/hashicorp/go-plugin/plugin.go
generated
vendored
58
vendor/github.com/hashicorp/go-plugin/plugin.go
generated
vendored
@ -1,58 +0,0 @@
|
|||||||
// The plugin package exposes functions and helpers for communicating to
|
|
||||||
// plugins which are implemented as standalone binary applications.
|
|
||||||
//
|
|
||||||
// plugin.Client fully manages the lifecycle of executing the application,
|
|
||||||
// connecting to it, and returning the RPC client for dispensing plugins.
|
|
||||||
//
|
|
||||||
// plugin.Serve fully manages listeners to expose an RPC server from a binary
|
|
||||||
// that plugin.Client can connect to.
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"net/rpc"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Plugin is the interface that is implemented to serve/connect to an
|
|
||||||
// inteface implementation.
|
|
||||||
type Plugin interface {
|
|
||||||
// Server should return the RPC server compatible struct to serve
|
|
||||||
// the methods that the Client calls over net/rpc.
|
|
||||||
Server(*MuxBroker) (interface{}, error)
|
|
||||||
|
|
||||||
// Client returns an interface implementation for the plugin you're
|
|
||||||
// serving that communicates to the server end of the plugin.
|
|
||||||
Client(*MuxBroker, *rpc.Client) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCPlugin is the interface that is implemented to serve/connect to
|
|
||||||
// a plugin over gRPC.
|
|
||||||
type GRPCPlugin interface {
|
|
||||||
// GRPCServer should register this plugin for serving with the
|
|
||||||
// given GRPCServer. Unlike Plugin.Server, this is only called once
|
|
||||||
// since gRPC plugins serve singletons.
|
|
||||||
GRPCServer(*GRPCBroker, *grpc.Server) error
|
|
||||||
|
|
||||||
// GRPCClient should return the interface implementation for the plugin
|
|
||||||
// you're serving via gRPC. The provided context will be canceled by
|
|
||||||
// go-plugin in the event of the plugin process exiting.
|
|
||||||
GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
|
|
||||||
// Server and Client functions. This will effectively disable support for
|
|
||||||
// net/rpc based plugins.
|
|
||||||
//
|
|
||||||
// This struct can be embedded in your struct.
|
|
||||||
type NetRPCUnsupportedPlugin struct{}
|
|
||||||
|
|
||||||
func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
|
|
||||||
return nil, errors.New("net/rpc plugin protocol not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
|
|
||||||
return nil, errors.New("net/rpc plugin protocol not supported")
|
|
||||||
}
|
|
24
vendor/github.com/hashicorp/go-plugin/process.go
generated
vendored
24
vendor/github.com/hashicorp/go-plugin/process.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pidAlive checks whether a pid is alive.
|
|
||||||
func pidAlive(pid int) bool {
|
|
||||||
return _pidAlive(pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pidWait blocks for a process to exit.
|
|
||||||
func pidWait(pid int) error {
|
|
||||||
ticker := time.NewTicker(1 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for range ticker.C {
|
|
||||||
if !pidAlive(pid) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
20
vendor/github.com/hashicorp/go-plugin/process_posix.go
generated
vendored
20
vendor/github.com/hashicorp/go-plugin/process_posix.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// _pidAlive tests whether a process is alive or not by sending it Signal 0,
|
|
||||||
// since Go otherwise has no way to test this.
|
|
||||||
func _pidAlive(pid int) bool {
|
|
||||||
proc, err := os.FindProcess(pid)
|
|
||||||
if err == nil {
|
|
||||||
err = proc.Signal(syscall.Signal(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
return err == nil
|
|
||||||
}
|
|
30
vendor/github.com/hashicorp/go-plugin/process_windows.go
generated
vendored
30
vendor/github.com/hashicorp/go-plugin/process_windows.go
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Weird name but matches the MSDN docs
|
|
||||||
exit_STILL_ACTIVE = 259
|
|
||||||
|
|
||||||
processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
|
|
||||||
syscall.PROCESS_QUERY_INFORMATION |
|
|
||||||
syscall.SYNCHRONIZE
|
|
||||||
)
|
|
||||||
|
|
||||||
// _pidAlive tests whether a process is alive or not
|
|
||||||
func _pidAlive(pid int) bool {
|
|
||||||
h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer syscall.CloseHandle(h)
|
|
||||||
|
|
||||||
var ec uint32
|
|
||||||
if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return ec == exit_STILL_ACTIVE
|
|
||||||
}
|
|
45
vendor/github.com/hashicorp/go-plugin/protocol.go
generated
vendored
45
vendor/github.com/hashicorp/go-plugin/protocol.go
generated
vendored
@ -1,45 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Protocol is an enum representing the types of protocols.
|
|
||||||
type Protocol string
|
|
||||||
|
|
||||||
const (
|
|
||||||
ProtocolInvalid Protocol = ""
|
|
||||||
ProtocolNetRPC Protocol = "netrpc"
|
|
||||||
ProtocolGRPC Protocol = "grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServerProtocol is an interface that must be implemented for new plugin
|
|
||||||
// protocols to be servers.
|
|
||||||
type ServerProtocol interface {
|
|
||||||
// Init is called once to configure and initialize the protocol, but
|
|
||||||
// not start listening. This is the point at which all validation should
|
|
||||||
// be done and errors returned.
|
|
||||||
Init() error
|
|
||||||
|
|
||||||
// Config is extra configuration to be outputted to stdout. This will
|
|
||||||
// be automatically base64 encoded to ensure it can be parsed properly.
|
|
||||||
// This can be an empty string if additional configuration is not needed.
|
|
||||||
Config() string
|
|
||||||
|
|
||||||
// Serve is called to serve connections on the given listener. This should
|
|
||||||
// continue until the listener is closed.
|
|
||||||
Serve(net.Listener)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientProtocol is an interface that must be implemented for new plugin
|
|
||||||
// protocols to be clients.
|
|
||||||
type ClientProtocol interface {
|
|
||||||
io.Closer
|
|
||||||
|
|
||||||
// Dispense dispenses a new instance of the plugin with the given name.
|
|
||||||
Dispense(string) (interface{}, error)
|
|
||||||
|
|
||||||
// Ping checks that the client connection is still healthy.
|
|
||||||
Ping() error
|
|
||||||
}
|
|
170
vendor/github.com/hashicorp/go-plugin/rpc_client.go
generated
vendored
170
vendor/github.com/hashicorp/go-plugin/rpc_client.go
generated
vendored
@ -1,170 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/rpc"
|
|
||||||
|
|
||||||
"github.com/hashicorp/yamux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
|
|
||||||
type RPCClient struct {
|
|
||||||
broker *MuxBroker
|
|
||||||
control *rpc.Client
|
|
||||||
plugins map[string]Plugin
|
|
||||||
|
|
||||||
// These are the streams used for the various stdout/err overrides
|
|
||||||
stdout, stderr net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRPCClient creates a new RPCClient. The Client argument is expected
|
|
||||||
// to be successfully started already with a lock held.
|
|
||||||
func newRPCClient(c *Client) (*RPCClient, error) {
|
|
||||||
// Connect to the client
|
|
||||||
conn, err := net.Dial(c.address.Network(), c.address.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
|
||||||
// Make sure to set keep alive so that the connection doesn't die
|
|
||||||
tcpConn.SetKeepAlive(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.config.TLSConfig != nil {
|
|
||||||
conn = tls.Client(conn, c.config.TLSConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the actual RPC client
|
|
||||||
result, err := NewRPCClient(conn, c.config.Plugins)
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin the stream syncing so that stdin, out, err work properly
|
|
||||||
err = result.SyncStreams(
|
|
||||||
c.config.SyncStdout,
|
|
||||||
c.config.SyncStderr)
|
|
||||||
if err != nil {
|
|
||||||
result.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRPCClient creates a client from an already-open connection-like value.
|
|
||||||
// Dial is typically used instead.
|
|
||||||
func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
|
|
||||||
// Create the yamux client so we can multiplex
|
|
||||||
mux, err := yamux.Client(conn, nil)
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to the control stream.
|
|
||||||
control, err := mux.Open()
|
|
||||||
if err != nil {
|
|
||||||
mux.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect stdout, stderr streams
|
|
||||||
stdstream := make([]net.Conn, 2)
|
|
||||||
for i, _ := range stdstream {
|
|
||||||
stdstream[i], err = mux.Open()
|
|
||||||
if err != nil {
|
|
||||||
mux.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the broker and start it up
|
|
||||||
broker := newMuxBroker(mux)
|
|
||||||
go broker.Run()
|
|
||||||
|
|
||||||
// Build the client using our broker and control channel.
|
|
||||||
return &RPCClient{
|
|
||||||
broker: broker,
|
|
||||||
control: rpc.NewClient(control),
|
|
||||||
plugins: plugins,
|
|
||||||
stdout: stdstream[0],
|
|
||||||
stderr: stdstream[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncStreams should be called to enable syncing of stdout,
|
|
||||||
// stderr with the plugin.
|
|
||||||
//
|
|
||||||
// This will return immediately and the syncing will continue to happen
|
|
||||||
// in the background. You do not need to launch this in a goroutine itself.
|
|
||||||
//
|
|
||||||
// This should never be called multiple times.
|
|
||||||
func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
|
|
||||||
go copyStream("stdout", stdout, c.stdout)
|
|
||||||
go copyStream("stderr", stderr, c.stderr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection. The client is no longer usable after this
|
|
||||||
// is called.
|
|
||||||
func (c *RPCClient) Close() error {
|
|
||||||
// Call the control channel and ask it to gracefully exit. If this
|
|
||||||
// errors, then we save it so that we always return an error but we
|
|
||||||
// want to try to close the other channels anyways.
|
|
||||||
var empty struct{}
|
|
||||||
returnErr := c.control.Call("Control.Quit", true, &empty)
|
|
||||||
|
|
||||||
// Close the other streams we have
|
|
||||||
if err := c.control.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.stdout.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.stderr.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.broker.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return back the error we got from Control.Quit. This is very important
|
|
||||||
// since we MUST return non-nil error if this fails so that Client.Kill
|
|
||||||
// will properly try a process.Kill.
|
|
||||||
return returnErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *RPCClient) Dispense(name string) (interface{}, error) {
|
|
||||||
p, ok := c.plugins[name]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown plugin type: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var id uint32
|
|
||||||
if err := c.control.Call(
|
|
||||||
"Dispenser.Dispense", name, &id); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := c.broker.Dial(id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.Client(c.broker, rpc.NewClient(conn))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping pings the connection to ensure it is still alive.
|
|
||||||
//
|
|
||||||
// The error from the RPC call is returned exactly if you want to inspect
|
|
||||||
// it for further error analysis. Any error returned from here would indicate
|
|
||||||
// that the connection to the plugin is not healthy.
|
|
||||||
func (c *RPCClient) Ping() error {
|
|
||||||
var empty struct{}
|
|
||||||
return c.control.Call("Control.Ping", true, &empty)
|
|
||||||
}
|
|
201
vendor/github.com/hashicorp/go-plugin/rpc_server.go
generated
vendored
201
vendor/github.com/hashicorp/go-plugin/rpc_server.go
generated
vendored
@ -1,201 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/rpc"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/yamux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RPCServer listens for network connections and then dispenses interface
|
|
||||||
// implementations over net/rpc.
|
|
||||||
//
|
|
||||||
// After setting the fields below, they shouldn't be read again directly
|
|
||||||
// from the structure which may be reading/writing them concurrently.
|
|
||||||
type RPCServer struct {
|
|
||||||
Plugins map[string]Plugin
|
|
||||||
|
|
||||||
// Stdout, Stderr are what this server will use instead of the
|
|
||||||
// normal stdin/out/err. This is because due to the multi-process nature
|
|
||||||
// of our plugin system, we can't use the normal process values so we
|
|
||||||
// make our own custom one we pipe across.
|
|
||||||
Stdout io.Reader
|
|
||||||
Stderr io.Reader
|
|
||||||
|
|
||||||
// DoneCh should be set to a non-nil channel that will be closed
|
|
||||||
// when the control requests the RPC server to end.
|
|
||||||
DoneCh chan<- struct{}
|
|
||||||
|
|
||||||
lock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerProtocol impl.
|
|
||||||
func (s *RPCServer) Init() error { return nil }
|
|
||||||
|
|
||||||
// ServerProtocol impl.
|
|
||||||
func (s *RPCServer) Config() string { return "" }
|
|
||||||
|
|
||||||
// ServerProtocol impl.
|
|
||||||
func (s *RPCServer) Serve(lis net.Listener) {
|
|
||||||
for {
|
|
||||||
conn, err := lis.Accept()
|
|
||||||
if err != nil {
|
|
||||||
severity := "ERR"
|
|
||||||
if errors.Is(err, net.ErrClosed) {
|
|
||||||
severity = "DEBUG"
|
|
||||||
}
|
|
||||||
log.Printf("[%s] plugin: plugin server: %s", severity, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
go s.ServeConn(conn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeConn runs a single connection.
|
|
||||||
//
|
|
||||||
// ServeConn blocks, serving the connection until the client hangs up.
|
|
||||||
func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
|
|
||||||
// First create the yamux server to wrap this connection
|
|
||||||
mux, err := yamux.Server(conn, nil)
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
log.Printf("[ERR] plugin: error creating yamux server: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept the control connection
|
|
||||||
control, err := mux.Accept()
|
|
||||||
if err != nil {
|
|
||||||
mux.Close()
|
|
||||||
if err != io.EOF {
|
|
||||||
log.Printf("[ERR] plugin: error accepting control connection: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect the stdstreams (in, out, err)
|
|
||||||
stdstream := make([]net.Conn, 2)
|
|
||||||
for i, _ := range stdstream {
|
|
||||||
stdstream[i], err = mux.Accept()
|
|
||||||
if err != nil {
|
|
||||||
mux.Close()
|
|
||||||
log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy std streams out to the proper place
|
|
||||||
go copyStream("stdout", stdstream[0], s.Stdout)
|
|
||||||
go copyStream("stderr", stdstream[1], s.Stderr)
|
|
||||||
|
|
||||||
// Create the broker and start it up
|
|
||||||
broker := newMuxBroker(mux)
|
|
||||||
go broker.Run()
|
|
||||||
|
|
||||||
// Use the control connection to build the dispenser and serve the
|
|
||||||
// connection.
|
|
||||||
server := rpc.NewServer()
|
|
||||||
server.RegisterName("Control", &controlServer{
|
|
||||||
server: s,
|
|
||||||
})
|
|
||||||
server.RegisterName("Dispenser", &dispenseServer{
|
|
||||||
broker: broker,
|
|
||||||
plugins: s.Plugins,
|
|
||||||
})
|
|
||||||
server.ServeConn(control)
|
|
||||||
}
|
|
||||||
|
|
||||||
// done is called internally by the control server to trigger the
|
|
||||||
// doneCh to close which is listened to by the main process to cleanly
|
|
||||||
// exit.
|
|
||||||
func (s *RPCServer) done() {
|
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
|
|
||||||
if s.DoneCh != nil {
|
|
||||||
close(s.DoneCh)
|
|
||||||
s.DoneCh = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dispenseServer dispenses variousinterface implementations for Terraform.
|
|
||||||
type controlServer struct {
|
|
||||||
server *RPCServer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping can be called to verify the connection (and likely the binary)
|
|
||||||
// is still alive to a plugin.
|
|
||||||
func (c *controlServer) Ping(
|
|
||||||
null bool, response *struct{}) error {
|
|
||||||
*response = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controlServer) Quit(
|
|
||||||
null bool, response *struct{}) error {
|
|
||||||
// End the server
|
|
||||||
c.server.done()
|
|
||||||
|
|
||||||
// Always return true
|
|
||||||
*response = struct{}{}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dispenseServer dispenses variousinterface implementations for Terraform.
|
|
||||||
type dispenseServer struct {
|
|
||||||
broker *MuxBroker
|
|
||||||
plugins map[string]Plugin
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dispenseServer) Dispense(
|
|
||||||
name string, response *uint32) error {
|
|
||||||
// Find the function to create this implementation
|
|
||||||
p, ok := d.plugins[name]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unknown plugin type: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the implementation first so we know if there is an error.
|
|
||||||
impl, err := p.Server(d.broker)
|
|
||||||
if err != nil {
|
|
||||||
// We turn the error into an errors error so that it works across RPC
|
|
||||||
return errors.New(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve an ID for our implementation
|
|
||||||
id := d.broker.NextId()
|
|
||||||
*response = id
|
|
||||||
|
|
||||||
// Run the rest in a goroutine since it can only happen once this RPC
|
|
||||||
// call returns. We wait for a connection for the plugin implementation
|
|
||||||
// and serve it.
|
|
||||||
go func() {
|
|
||||||
conn, err := d.broker.Accept(id)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
serve(conn, "Plugin", impl)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func serve(conn io.ReadWriteCloser, name string, v interface{}) {
|
|
||||||
server := rpc.NewServer()
|
|
||||||
if err := server.RegisterName(name, v); err != nil {
|
|
||||||
log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
server.ServeConn(conn)
|
|
||||||
}
|
|
591
vendor/github.com/hashicorp/go-plugin/server.go
generated
vendored
591
vendor/github.com/hashicorp/go-plugin/server.go
generated
vendored
@ -1,591 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
hclog "github.com/hashicorp/go-hclog"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
|
|
||||||
// We will increment this whenever we change any protocol behavior. This
|
|
||||||
// will invalidate any prior plugins but will at least allow us to iterate
|
|
||||||
// on the core in a safe way. We will do our best to do this very
|
|
||||||
// infrequently.
|
|
||||||
const CoreProtocolVersion = 1
|
|
||||||
|
|
||||||
// HandshakeConfig is the configuration used by client and servers to
|
|
||||||
// handshake before starting a plugin connection. This is embedded by
|
|
||||||
// both ServeConfig and ClientConfig.
|
|
||||||
//
|
|
||||||
// In practice, the plugin host creates a HandshakeConfig that is exported
|
|
||||||
// and plugins then can easily consume it.
|
|
||||||
type HandshakeConfig struct {
|
|
||||||
// ProtocolVersion is the version that clients must match on to
|
|
||||||
// agree they can communicate. This should match the ProtocolVersion
|
|
||||||
// set on ClientConfig when using a plugin.
|
|
||||||
// This field is not required if VersionedPlugins are being used in the
|
|
||||||
// Client or Server configurations.
|
|
||||||
ProtocolVersion uint
|
|
||||||
|
|
||||||
// MagicCookieKey and value are used as a very basic verification
|
|
||||||
// that a plugin is intended to be launched. This is not a security
|
|
||||||
// measure, just a UX feature. If the magic cookie doesn't match,
|
|
||||||
// we show human-friendly output.
|
|
||||||
MagicCookieKey string
|
|
||||||
MagicCookieValue string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginSet is a set of plugins provided to be registered in the plugin
|
|
||||||
// server.
|
|
||||||
type PluginSet map[string]Plugin
|
|
||||||
|
|
||||||
// ServeConfig configures what sorts of plugins are served.
|
|
||||||
type ServeConfig struct {
|
|
||||||
// HandshakeConfig is the configuration that must match clients.
|
|
||||||
HandshakeConfig
|
|
||||||
|
|
||||||
// TLSProvider is a function that returns a configured tls.Config.
|
|
||||||
TLSProvider func() (*tls.Config, error)
|
|
||||||
|
|
||||||
// Plugins are the plugins that are served.
|
|
||||||
// The implied version of this PluginSet is the Handshake.ProtocolVersion.
|
|
||||||
Plugins PluginSet
|
|
||||||
|
|
||||||
// VersionedPlugins is a map of PluginSets for specific protocol versions.
|
|
||||||
// These can be used to negotiate a compatible version between client and
|
|
||||||
// server. If this is set, Handshake.ProtocolVersion is not required.
|
|
||||||
VersionedPlugins map[int]PluginSet
|
|
||||||
|
|
||||||
// GRPCServer should be non-nil to enable serving the plugins over
|
|
||||||
// gRPC. This is a function to create the server when needed with the
|
|
||||||
// given server options. The server options populated by go-plugin will
|
|
||||||
// be for TLS if set. You may modify the input slice.
|
|
||||||
//
|
|
||||||
// Note that the grpc.Server will automatically be registered with
|
|
||||||
// the gRPC health checking service. This is not optional since go-plugin
|
|
||||||
// relies on this to implement Ping().
|
|
||||||
GRPCServer func([]grpc.ServerOption) *grpc.Server
|
|
||||||
|
|
||||||
// Logger is used to pass a logger into the server. If none is provided the
|
|
||||||
// server will create a default logger.
|
|
||||||
Logger hclog.Logger
|
|
||||||
|
|
||||||
// Test, if non-nil, will put plugin serving into "test mode". This is
|
|
||||||
// meant to be used as part of `go test` within a plugin's codebase to
|
|
||||||
// launch the plugin in-process and output a ReattachConfig.
|
|
||||||
//
|
|
||||||
// This changes the behavior of the server in a number of ways to
|
|
||||||
// accomodate the expectation of running in-process:
|
|
||||||
//
|
|
||||||
// * The handshake cookie is not validated.
|
|
||||||
// * Stdout/stderr will receive plugin reads and writes
|
|
||||||
// * Connection information will not be sent to stdout
|
|
||||||
//
|
|
||||||
Test *ServeTestConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test.
|
|
||||||
type ServeTestConfig struct {
|
|
||||||
// Context, if set, will force the plugin serving to end when cancelled.
|
|
||||||
// This is only a test configuration because the non-test configuration
|
|
||||||
// expects to take over the process and therefore end on an interrupt or
|
|
||||||
// kill signal. For tests, we need to kill the plugin serving routinely
|
|
||||||
// and this provides a way to do so.
|
|
||||||
//
|
|
||||||
// If you want to wait for the plugin process to close before moving on,
|
|
||||||
// you can wait on CloseCh.
|
|
||||||
Context context.Context
|
|
||||||
|
|
||||||
// If this channel is non-nil, we will send the ReattachConfig via
|
|
||||||
// this channel. This can be encoded (via JSON recommended) to the
|
|
||||||
// plugin client to attach to this plugin.
|
|
||||||
ReattachConfigCh chan<- *ReattachConfig
|
|
||||||
|
|
||||||
// CloseCh, if non-nil, will be closed when serving exits. This can be
|
|
||||||
// used along with Context to determine when the server is fully shut down.
|
|
||||||
// If this is not set, you can still use Context on its own, but note there
|
|
||||||
// may be a period of time between canceling the context and the plugin
|
|
||||||
// server being shut down.
|
|
||||||
CloseCh chan<- struct{}
|
|
||||||
|
|
||||||
// SyncStdio, if true, will enable the client side "SyncStdout/Stderr"
|
|
||||||
// functionality to work. This defaults to false because the implementation
|
|
||||||
// of making this work within test environments is particularly messy
|
|
||||||
// and SyncStdio functionality is fairly rare, so we default to the simple
|
|
||||||
// scenario.
|
|
||||||
SyncStdio bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// protocolVersion determines the protocol version and plugin set to be used by
|
|
||||||
// the server. In the event that there is no suitable version, the last version
|
|
||||||
// in the config is returned leaving the client to report the incompatibility.
|
|
||||||
func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {
|
|
||||||
protoVersion := int(opts.ProtocolVersion)
|
|
||||||
pluginSet := opts.Plugins
|
|
||||||
protoType := ProtocolNetRPC
|
|
||||||
// Check if the client sent a list of acceptable versions
|
|
||||||
var clientVersions []int
|
|
||||||
if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" {
|
|
||||||
for _, s := range strings.Split(vs, ",") {
|
|
||||||
v, err := strconv.Atoi(s)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
clientVersions = append(clientVersions, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We want to iterate in reverse order, to ensure we match the newest
|
|
||||||
// compatible plugin version.
|
|
||||||
sort.Sort(sort.Reverse(sort.IntSlice(clientVersions)))
|
|
||||||
|
|
||||||
// set the old un-versioned fields as if they were versioned plugins
|
|
||||||
if opts.VersionedPlugins == nil {
|
|
||||||
opts.VersionedPlugins = make(map[int]PluginSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pluginSet != nil {
|
|
||||||
opts.VersionedPlugins[protoVersion] = pluginSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the version to make sure we match the latest first
|
|
||||||
var versions []int
|
|
||||||
for v := range opts.VersionedPlugins {
|
|
||||||
versions = append(versions, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(sort.Reverse(sort.IntSlice(versions)))
|
|
||||||
|
|
||||||
// See if we have multiple versions of Plugins to choose from
|
|
||||||
for _, version := range versions {
|
|
||||||
// Record each version, since we guarantee that this returns valid
|
|
||||||
// values even if they are not a protocol match.
|
|
||||||
protoVersion = version
|
|
||||||
pluginSet = opts.VersionedPlugins[version]
|
|
||||||
|
|
||||||
// If we have a configured gRPC server we should select a protocol
|
|
||||||
if opts.GRPCServer != nil {
|
|
||||||
// All plugins in a set must use the same transport, so check the first
|
|
||||||
// for the protocol type
|
|
||||||
for _, p := range pluginSet {
|
|
||||||
switch p.(type) {
|
|
||||||
case GRPCPlugin:
|
|
||||||
protoType = ProtocolGRPC
|
|
||||||
default:
|
|
||||||
protoType = ProtocolNetRPC
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, clientVersion := range clientVersions {
|
|
||||||
if clientVersion == protoVersion {
|
|
||||||
return protoVersion, protoType, pluginSet
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the lowest version as the fallback.
|
|
||||||
// Since we iterated over all the versions in reverse order above, these
|
|
||||||
// values are from the lowest version number plugins (which may be from
|
|
||||||
// a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins
|
|
||||||
// fields). This allows serving the oldest version of our plugins to a
|
|
||||||
// legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list.
|
|
||||||
return protoVersion, protoType, pluginSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serve serves the plugins given by ServeConfig.
|
|
||||||
//
|
|
||||||
// Serve doesn't return until the plugin is done being executed. Any
|
|
||||||
// fixable errors will be output to os.Stderr and the process will
|
|
||||||
// exit with a status code of 1. Serve will panic for unexpected
|
|
||||||
// conditions where a user's fix is unknown.
|
|
||||||
//
|
|
||||||
// This is the method that plugins should call in their main() functions.
|
|
||||||
func Serve(opts *ServeConfig) {
|
|
||||||
exitCode := -1
|
|
||||||
// We use this to trigger an `os.Exit` so that we can execute our other
|
|
||||||
// deferred functions. In test mode, we just output the err to stderr
|
|
||||||
// and return.
|
|
||||||
defer func() {
|
|
||||||
if opts.Test == nil && exitCode >= 0 {
|
|
||||||
os.Exit(exitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Test != nil && opts.Test.CloseCh != nil {
|
|
||||||
close(opts.Test.CloseCh)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if opts.Test == nil {
|
|
||||||
// Validate the handshake config
|
|
||||||
if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
|
|
||||||
fmt.Fprintf(os.Stderr,
|
|
||||||
"Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
|
|
||||||
"key or value was set. Please notify the plugin author and report\n"+
|
|
||||||
"this as a bug.\n")
|
|
||||||
exitCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// First check the cookie
|
|
||||||
if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
|
|
||||||
fmt.Fprintf(os.Stderr,
|
|
||||||
"This binary is a plugin. These are not meant to be executed directly.\n"+
|
|
||||||
"Please execute the program that consumes these plugins, which will\n"+
|
|
||||||
"load any plugins automatically\n")
|
|
||||||
exitCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// negotiate the version and plugins
|
|
||||||
// start with default version in the handshake config
|
|
||||||
protoVersion, protoType, pluginSet := protocolVersion(opts)
|
|
||||||
|
|
||||||
logger := opts.Logger
|
|
||||||
if logger == nil {
|
|
||||||
// internal logger to os.Stderr
|
|
||||||
logger = hclog.New(&hclog.LoggerOptions{
|
|
||||||
Level: hclog.Trace,
|
|
||||||
Output: os.Stderr,
|
|
||||||
JSONFormat: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register a listener so we can accept a connection
|
|
||||||
listener, err := serverListener()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("plugin init error", "error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the listener on return. We wrap this in a func() on purpose
|
|
||||||
// because the "listener" reference may change to TLS.
|
|
||||||
defer func() {
|
|
||||||
listener.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
var tlsConfig *tls.Config
|
|
||||||
if opts.TLSProvider != nil {
|
|
||||||
tlsConfig, err = opts.TLSProvider()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("plugin tls init", "error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var serverCert string
|
|
||||||
clientCert := os.Getenv("PLUGIN_CLIENT_CERT")
|
|
||||||
// If the client is configured using AutoMTLS, the certificate will be here,
|
|
||||||
// and we need to generate our own in response.
|
|
||||||
if tlsConfig == nil && clientCert != "" {
|
|
||||||
logger.Info("configuring server automatic mTLS")
|
|
||||||
clientCertPool := x509.NewCertPool()
|
|
||||||
if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) {
|
|
||||||
logger.Error("client cert provided but failed to parse", "cert", clientCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
certPEM, keyPEM, err := generateCert()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to generate server certificate", "error", err)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to parse server certificate", "error", err)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConfig = &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
|
||||||
ClientCAs: clientCertPool,
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
RootCAs: clientCertPool,
|
|
||||||
ServerName: "localhost",
|
|
||||||
}
|
|
||||||
|
|
||||||
// We send back the raw leaf cert data for the client rather than the
|
|
||||||
// PEM, since the protocol can't handle newlines.
|
|
||||||
serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the channel to tell us when we're done
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
|
|
||||||
// Create our new stdout, stderr files. These will override our built-in
|
|
||||||
// stdout/stderr so that it works across the stream boundary.
|
|
||||||
var stdout_r, stderr_r io.Reader
|
|
||||||
stdout_r, stdout_w, err := os.Pipe()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
stderr_r, stderr_w, err := os.Pipe()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're in test mode, we tee off the reader and write the data
|
|
||||||
// as-is to our normal Stdout and Stderr so that they continue working
|
|
||||||
// while stdio works. This is because in test mode, we assume we're running
|
|
||||||
// in `go test` or some equivalent and we want output to go to standard
|
|
||||||
// locations.
|
|
||||||
if opts.Test != nil {
|
|
||||||
// TODO(mitchellh): This isn't super ideal because a TeeReader
|
|
||||||
// only works if the reader side is actively read. If we never
|
|
||||||
// connect via a plugin client, the output still gets swallowed.
|
|
||||||
stdout_r = io.TeeReader(stdout_r, os.Stdout)
|
|
||||||
stderr_r = io.TeeReader(stderr_r, os.Stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the server type
|
|
||||||
var server ServerProtocol
|
|
||||||
switch protoType {
|
|
||||||
case ProtocolNetRPC:
|
|
||||||
// If we have a TLS configuration then we wrap the listener
|
|
||||||
// ourselves and do it at that level.
|
|
||||||
if tlsConfig != nil {
|
|
||||||
listener = tls.NewListener(listener, tlsConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the RPC server to dispense
|
|
||||||
server = &RPCServer{
|
|
||||||
Plugins: pluginSet,
|
|
||||||
Stdout: stdout_r,
|
|
||||||
Stderr: stderr_r,
|
|
||||||
DoneCh: doneCh,
|
|
||||||
}
|
|
||||||
|
|
||||||
case ProtocolGRPC:
|
|
||||||
// Create the gRPC server
|
|
||||||
server = &GRPCServer{
|
|
||||||
Plugins: pluginSet,
|
|
||||||
Server: opts.GRPCServer,
|
|
||||||
TLS: tlsConfig,
|
|
||||||
Stdout: stdout_r,
|
|
||||||
Stderr: stderr_r,
|
|
||||||
DoneCh: doneCh,
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("unknown server protocol: " + protoType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the servers
|
|
||||||
if err := server.Init(); err != nil {
|
|
||||||
logger.Error("protocol init", "error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
|
|
||||||
|
|
||||||
// Output the address and service name to stdout so that the client can
|
|
||||||
// bring it up. In test mode, we don't do this because clients will
|
|
||||||
// attach via a reattach config.
|
|
||||||
if opts.Test == nil {
|
|
||||||
fmt.Printf("%d|%d|%s|%s|%s|%s\n",
|
|
||||||
CoreProtocolVersion,
|
|
||||||
protoVersion,
|
|
||||||
listener.Addr().Network(),
|
|
||||||
listener.Addr().String(),
|
|
||||||
protoType,
|
|
||||||
serverCert)
|
|
||||||
os.Stdout.Sync()
|
|
||||||
} else if ch := opts.Test.ReattachConfigCh; ch != nil {
|
|
||||||
// Send back the reattach config that can be used. This isn't
|
|
||||||
// quite ready if they connect immediately but the client should
|
|
||||||
// retry a few times.
|
|
||||||
ch <- &ReattachConfig{
|
|
||||||
Protocol: protoType,
|
|
||||||
ProtocolVersion: protoVersion,
|
|
||||||
Addr: listener.Addr(),
|
|
||||||
Pid: os.Getpid(),
|
|
||||||
Test: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Eat the interrupts. In test mode we disable this so that go test
|
|
||||||
// can be cancelled properly.
|
|
||||||
if opts.Test == nil {
|
|
||||||
ch := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(ch, os.Interrupt)
|
|
||||||
go func() {
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
<-ch
|
|
||||||
count++
|
|
||||||
logger.Trace("plugin received interrupt signal, ignoring", "count", count)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set our stdout, stderr to the stdio stream that clients can retrieve
|
|
||||||
// using ClientConfig.SyncStdout/err. We only do this for non-test mode
|
|
||||||
// or if the test mode explicitly requests it.
|
|
||||||
//
|
|
||||||
// In test mode, we use a multiwriter so that the data continues going
|
|
||||||
// to the normal stdout/stderr so output can show up in test logs. We
|
|
||||||
// also send to the stdio stream so that clients can continue working
|
|
||||||
// if they depend on that.
|
|
||||||
if opts.Test == nil || opts.Test.SyncStdio {
|
|
||||||
if opts.Test != nil {
|
|
||||||
// In test mode we need to maintain the original values so we can
|
|
||||||
// reset it.
|
|
||||||
defer func(out, err *os.File) {
|
|
||||||
os.Stdout = out
|
|
||||||
os.Stderr = err
|
|
||||||
}(os.Stdout, os.Stderr)
|
|
||||||
}
|
|
||||||
os.Stdout = stdout_w
|
|
||||||
os.Stderr = stderr_w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept connections and wait for completion
|
|
||||||
go server.Serve(listener)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
if opts.Test != nil && opts.Test.Context != nil {
|
|
||||||
ctx = opts.Test.Context
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// Cancellation. We can stop the server by closing the listener.
|
|
||||||
// This isn't graceful at all but this is currently only used by
|
|
||||||
// tests and its our only way to stop.
|
|
||||||
listener.Close()
|
|
||||||
|
|
||||||
// If this is a grpc server, then we also ask the server itself to
|
|
||||||
// end which will kill all connections. There isn't an easy way to do
|
|
||||||
// this for net/rpc currently but net/rpc is more and more unused.
|
|
||||||
if s, ok := server.(*GRPCServer); ok {
|
|
||||||
s.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the server itself to shut down
|
|
||||||
<-doneCh
|
|
||||||
|
|
||||||
case <-doneCh:
|
|
||||||
// Note that given the documentation of Serve we should probably be
|
|
||||||
// setting exitCode = 0 and using os.Exit here. That's how it used to
|
|
||||||
// work before extracting this library. However, for years we've done
|
|
||||||
// this so we'll keep this functionality.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func serverListener() (net.Listener, error) {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return serverListener_tcp()
|
|
||||||
}
|
|
||||||
|
|
||||||
return serverListener_unix()
|
|
||||||
}
|
|
||||||
|
|
||||||
func serverListener_tcp() (net.Listener, error) {
|
|
||||||
envMinPort := os.Getenv("PLUGIN_MIN_PORT")
|
|
||||||
envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
|
|
||||||
|
|
||||||
var minPort, maxPort int64
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(envMinPort) == 0:
|
|
||||||
minPort = 0
|
|
||||||
default:
|
|
||||||
minPort, err = strconv.ParseInt(envMinPort, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(envMaxPort) == 0:
|
|
||||||
maxPort = 0
|
|
||||||
default:
|
|
||||||
maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if minPort > maxPort {
|
|
||||||
return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
for port := minPort; port <= maxPort; port++ {
|
|
||||||
address := fmt.Sprintf("127.0.0.1:%d", port)
|
|
||||||
listener, err := net.Listen("tcp", address)
|
|
||||||
if err == nil {
|
|
||||||
return listener, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.New("Couldn't bind plugin TCP listener")
|
|
||||||
}
|
|
||||||
|
|
||||||
func serverListener_unix() (net.Listener, error) {
|
|
||||||
tf, err := ioutil.TempFile("", "plugin")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
path := tf.Name()
|
|
||||||
|
|
||||||
// Close the file and remove it because it has to not exist for
|
|
||||||
// the domain socket.
|
|
||||||
if err := tf.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := os.Remove(path); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
l, err := net.Listen("unix", path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap the listener in rmListener so that the Unix domain socket file
|
|
||||||
// is removed on close.
|
|
||||||
return &rmListener{
|
|
||||||
Listener: l,
|
|
||||||
Path: path,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rmListener is an implementation of net.Listener that forwards most
|
|
||||||
// calls to the listener but also removes a file as part of the close. We
|
|
||||||
// use this to cleanup the unix domain socket on close.
|
|
||||||
type rmListener struct {
|
|
||||||
net.Listener
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *rmListener) Close() error {
|
|
||||||
// Close the listener itself
|
|
||||||
if err := l.Listener.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the file
|
|
||||||
return os.Remove(l.Path)
|
|
||||||
}
|
|
31
vendor/github.com/hashicorp/go-plugin/server_mux.go
generated
vendored
31
vendor/github.com/hashicorp/go-plugin/server_mux.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServeMuxMap is the type that is used to configure ServeMux
|
|
||||||
type ServeMuxMap map[string]*ServeConfig
|
|
||||||
|
|
||||||
// ServeMux is like Serve, but serves multiple types of plugins determined
|
|
||||||
// by the argument given on the command-line.
|
|
||||||
//
|
|
||||||
// This command doesn't return until the plugin is done being executed. Any
|
|
||||||
// errors are logged or output to stderr.
|
|
||||||
func ServeMux(m ServeMuxMap) {
|
|
||||||
if len(os.Args) != 2 {
|
|
||||||
fmt.Fprintf(os.Stderr,
|
|
||||||
"Invoked improperly. This is an internal command that shouldn't\n"+
|
|
||||||
"be manually invoked.\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts, ok := m[os.Args[1]]
|
|
||||||
if !ok {
|
|
||||||
fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
Serve(opts)
|
|
||||||
}
|
|
18
vendor/github.com/hashicorp/go-plugin/stream.go
generated
vendored
18
vendor/github.com/hashicorp/go-plugin/stream.go
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func copyStream(name string, dst io.Writer, src io.Reader) {
|
|
||||||
if src == nil {
|
|
||||||
panic(name + ": src is nil")
|
|
||||||
}
|
|
||||||
if dst == nil {
|
|
||||||
panic(name + ": dst is nil")
|
|
||||||
}
|
|
||||||
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
|
|
||||||
log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
|
|
||||||
}
|
|
||||||
}
|
|
180
vendor/github.com/hashicorp/go-plugin/testing.go
generated
vendored
180
vendor/github.com/hashicorp/go-plugin/testing.go
generated
vendored
@ -1,180 +0,0 @@
|
|||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/rpc"
|
|
||||||
|
|
||||||
hclog "github.com/hashicorp/go-hclog"
|
|
||||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
|
||||||
"github.com/mitchellh/go-testing-interface"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestOptions allows specifying options that can affect the behavior of the
|
|
||||||
// test functions
|
|
||||||
type TestOptions struct {
|
|
||||||
//ServerStdout causes the given value to be used in place of a blank buffer
|
|
||||||
//for RPCServer's Stdout
|
|
||||||
ServerStdout io.ReadCloser
|
|
||||||
|
|
||||||
//ServerStderr causes the given value to be used in place of a blank buffer
|
|
||||||
//for RPCServer's Stderr
|
|
||||||
ServerStderr io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// The testing file contains test helpers that you can use outside of
|
|
||||||
// this package for making it easier to test plugins themselves.
|
|
||||||
|
|
||||||
// TestConn is a helper function for returning a client and server
|
|
||||||
// net.Conn connected to each other.
|
|
||||||
func TestConn(t testing.T) (net.Conn, net.Conn) {
|
|
||||||
// Listen to any local port. This listener will be closed
|
|
||||||
// after a single connection is established.
|
|
||||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a goroutine to accept our client connection
|
|
||||||
var serverConn net.Conn
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(doneCh)
|
|
||||||
defer l.Close()
|
|
||||||
var err error
|
|
||||||
serverConn, err = l.Accept()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Connect to the server
|
|
||||||
clientConn, err := net.Dial("tcp", l.Addr().String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the server side to acknowledge it has connected
|
|
||||||
<-doneCh
|
|
||||||
|
|
||||||
return clientConn, serverConn
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRPCConn returns a rpc client and server connected to each other.
|
|
||||||
func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
|
|
||||||
clientConn, serverConn := TestConn(t)
|
|
||||||
|
|
||||||
server := rpc.NewServer()
|
|
||||||
go server.ServeConn(serverConn)
|
|
||||||
|
|
||||||
client := rpc.NewClient(clientConn)
|
|
||||||
return client, server
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPluginRPCConn returns a plugin RPC client and server that are connected
|
|
||||||
// together and configured.
|
|
||||||
func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) {
|
|
||||||
// Create two net.Conns we can use to shuttle our control connection
|
|
||||||
clientConn, serverConn := TestConn(t)
|
|
||||||
|
|
||||||
// Start up the server
|
|
||||||
server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
|
|
||||||
if opts != nil {
|
|
||||||
if opts.ServerStdout != nil {
|
|
||||||
server.Stdout = opts.ServerStdout
|
|
||||||
}
|
|
||||||
if opts.ServerStderr != nil {
|
|
||||||
server.Stderr = opts.ServerStderr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go server.ServeConn(serverConn)
|
|
||||||
|
|
||||||
// Connect the client to the server
|
|
||||||
client, err := NewRPCClient(clientConn, ps)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return client, server
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGRPCConn returns a gRPC client conn and grpc server that are connected
|
|
||||||
// together and configured. The register function is used to register services
|
|
||||||
// prior to the Serve call. This is used to test gRPC connections.
|
|
||||||
func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) {
|
|
||||||
// Create a listener
|
|
||||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
server := grpc.NewServer()
|
|
||||||
register(server)
|
|
||||||
go server.Serve(l)
|
|
||||||
|
|
||||||
// Connect to the server
|
|
||||||
conn, err := grpc.Dial(
|
|
||||||
l.Addr().String(),
|
|
||||||
grpc.WithBlock(),
|
|
||||||
grpc.WithInsecure())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connection successful, close the listener
|
|
||||||
l.Close()
|
|
||||||
|
|
||||||
return conn, server
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
|
|
||||||
// together and configured. This is used to test gRPC connections.
|
|
||||||
func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
|
|
||||||
// Create a listener
|
|
||||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start up the server
|
|
||||||
server := &GRPCServer{
|
|
||||||
Plugins: ps,
|
|
||||||
DoneCh: make(chan struct{}),
|
|
||||||
Server: DefaultGRPCServer,
|
|
||||||
Stdout: new(bytes.Buffer),
|
|
||||||
Stderr: new(bytes.Buffer),
|
|
||||||
logger: hclog.Default(),
|
|
||||||
}
|
|
||||||
if err := server.Init(); err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
go server.Serve(l)
|
|
||||||
|
|
||||||
// Connect to the server
|
|
||||||
conn, err := grpc.Dial(
|
|
||||||
l.Addr().String(),
|
|
||||||
grpc.WithBlock(),
|
|
||||||
grpc.WithInsecure())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
brokerGRPCClient := newGRPCBrokerClient(conn)
|
|
||||||
broker := newGRPCBroker(brokerGRPCClient, nil)
|
|
||||||
go broker.Run()
|
|
||||||
go brokerGRPCClient.StartStream()
|
|
||||||
|
|
||||||
// Create the client
|
|
||||||
client := &GRPCClient{
|
|
||||||
Conn: conn,
|
|
||||||
Plugins: ps,
|
|
||||||
broker: broker,
|
|
||||||
doneCtx: context.Background(),
|
|
||||||
controller: plugin.NewGRPCControllerClient(conn),
|
|
||||||
}
|
|
||||||
|
|
||||||
return client, server
|
|
||||||
}
|
|
363
vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE
generated
vendored
363
vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE
generated
vendored
@ -1,363 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
||||||
|
|
15
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go
generated
vendored
15
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package mlock
|
|
||||||
|
|
||||||
// This should be set by the OS-specific packages to tell whether LockMemory
|
|
||||||
// is supported or not.
|
|
||||||
var supported bool
|
|
||||||
|
|
||||||
// Supported returns true if LockMemory is functional on this system.
|
|
||||||
func Supported() bool {
|
|
||||||
return supported
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockMemory prevents any memory from being swapped to disk.
|
|
||||||
func LockMemory() error {
|
|
||||||
return lockMemory()
|
|
||||||
}
|
|
13
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go
generated
vendored
13
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
// +build android darwin nacl netbsd plan9 windows
|
|
||||||
|
|
||||||
package mlock
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
supported = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func lockMemory() error {
|
|
||||||
// XXX: No good way to do this on Windows. There is the VirtualLock
|
|
||||||
// method, but it requires a specific address and offset.
|
|
||||||
return nil
|
|
||||||
}
|
|
18
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go
generated
vendored
18
vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
// +build dragonfly freebsd linux openbsd solaris
|
|
||||||
|
|
||||||
package mlock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
supported = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func lockMemory() error {
|
|
||||||
// Mlockall prevents all current and future pages from being swapped out.
|
|
||||||
return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE)
|
|
||||||
}
|
|
12
vendor/github.com/hashicorp/go-uuid/.travis.yml
generated
vendored
12
vendor/github.com/hashicorp/go-uuid/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -bench . -benchmem -v ./...
|
|
363
vendor/github.com/hashicorp/go-uuid/LICENSE
generated
vendored
363
vendor/github.com/hashicorp/go-uuid/LICENSE
generated
vendored
@ -1,363 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
||||||
|
|
8
vendor/github.com/hashicorp/go-uuid/README.md
generated
vendored
8
vendor/github.com/hashicorp/go-uuid/README.md
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
|
|
||||||
|
|
||||||
Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
=============
|
|
||||||
|
|
||||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
|
|
83
vendor/github.com/hashicorp/go-uuid/uuid.go
generated
vendored
83
vendor/github.com/hashicorp/go-uuid/uuid.go
generated
vendored
@ -1,83 +0,0 @@
|
|||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GenerateRandomBytes is used to generate random bytes of given size.
|
|
||||||
func GenerateRandomBytes(size int) ([]byte, error) {
|
|
||||||
return GenerateRandomBytesWithReader(size, rand.Reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader.
|
|
||||||
func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) {
|
|
||||||
if reader == nil {
|
|
||||||
return nil, fmt.Errorf("provided reader is nil")
|
|
||||||
}
|
|
||||||
buf := make([]byte, size)
|
|
||||||
if _, err := io.ReadFull(reader, buf); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read random bytes: %v", err)
|
|
||||||
}
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
const uuidLen = 16
|
|
||||||
|
|
||||||
// GenerateUUID is used to generate a random UUID
|
|
||||||
func GenerateUUID() (string, error) {
|
|
||||||
return GenerateUUIDWithReader(rand.Reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateUUIDWithReader is used to generate a random UUID with a given Reader
|
|
||||||
func GenerateUUIDWithReader(reader io.Reader) (string, error) {
|
|
||||||
if reader == nil {
|
|
||||||
return "", fmt.Errorf("provided reader is nil")
|
|
||||||
}
|
|
||||||
buf, err := GenerateRandomBytesWithReader(uuidLen, reader)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return FormatUUID(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func FormatUUID(buf []byte) (string, error) {
|
|
||||||
if buflen := len(buf); buflen != uuidLen {
|
|
||||||
return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%x-%x-%x-%x-%x",
|
|
||||||
buf[0:4],
|
|
||||||
buf[4:6],
|
|
||||||
buf[6:8],
|
|
||||||
buf[8:10],
|
|
||||||
buf[10:16]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseUUID(uuid string) ([]byte, error) {
|
|
||||||
if len(uuid) != 2 * uuidLen + 4 {
|
|
||||||
return nil, fmt.Errorf("uuid string is wrong length")
|
|
||||||
}
|
|
||||||
|
|
||||||
if uuid[8] != '-' ||
|
|
||||||
uuid[13] != '-' ||
|
|
||||||
uuid[18] != '-' ||
|
|
||||||
uuid[23] != '-' {
|
|
||||||
return nil, fmt.Errorf("uuid is improperly formatted")
|
|
||||||
}
|
|
||||||
|
|
||||||
hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
|
|
||||||
|
|
||||||
ret, err := hex.DecodeString(hexStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(ret) != uuidLen {
|
|
||||||
return nil, fmt.Errorf("decoded hex is the wrong length")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
13
vendor/github.com/hashicorp/go-version/.travis.yml
generated
vendored
13
vendor/github.com/hashicorp/go-version/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.2
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.9
|
|
||||||
- "1.10"
|
|
||||||
- 1.11
|
|
||||||
- 1.12
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test
|
|
354
vendor/github.com/hashicorp/go-version/LICENSE
generated
vendored
354
vendor/github.com/hashicorp/go-version/LICENSE
generated
vendored
@ -1,354 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. “Contributor”
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. “Contributor Version”
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor’s Contribution.
|
|
||||||
|
|
||||||
1.3. “Contribution”
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. “Covered Software”
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. “Incompatible With Secondary Licenses”
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of version
|
|
||||||
1.1 or earlier of the License, but not also under the terms of a
|
|
||||||
Secondary License.
|
|
||||||
|
|
||||||
1.6. “Executable Form”
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. “Larger Work”
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a separate
|
|
||||||
file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. “License”
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. “Licensable”
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether at the
|
|
||||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
|
||||||
this License.
|
|
||||||
|
|
||||||
1.10. “Modifications”
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to, deletion
|
|
||||||
from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. “Patent Claims” of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method, process,
|
|
||||||
and apparatus claims, in any patent Licensable by such Contributor that
|
|
||||||
would be infringed, but for the grant of the License, by the making,
|
|
||||||
using, selling, offering for sale, having made, import, or transfer of
|
|
||||||
either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. “Secondary License”
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. “Source Code Form”
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. “You” (or “Your”)
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, “You” includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, “control” means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or as
|
|
||||||
part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its Contributions
|
|
||||||
or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
|
||||||
effective for each Contribution on the date the Contributor first distributes
|
|
||||||
such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under this
|
|
||||||
License. No additional rights or licenses will be implied from the distribution
|
|
||||||
or licensing of Covered Software under this License. Notwithstanding Section
|
|
||||||
2.1(b) above, no patent license is granted by a Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party’s
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
|
||||||
Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks, or
|
|
||||||
logos of any Contributor (except as may be necessary to comply with the
|
|
||||||
notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this License
|
|
||||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
|
||||||
under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its Contributions
|
|
||||||
are its original creation(s) or it has sufficient rights to grant the
|
|
||||||
rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under applicable
|
|
||||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under the
|
|
||||||
terms of this License. You must inform recipients that the Source Code Form
|
|
||||||
of the Covered Software is governed by the terms of this License, and how
|
|
||||||
they can obtain a copy of this License. You may not attempt to alter or
|
|
||||||
restrict the recipients’ rights in the Source Code Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this License,
|
|
||||||
or sublicense it under different terms, provided that the license for
|
|
||||||
the Executable Form does not attempt to limit or alter the recipients’
|
|
||||||
rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for the
|
|
||||||
Covered Software. If the Larger Work is a combination of Covered Software
|
|
||||||
with a work governed by one or more Secondary Licenses, and the Covered
|
|
||||||
Software is not Incompatible With Secondary Licenses, this License permits
|
|
||||||
You to additionally distribute such Covered Software under the terms of
|
|
||||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
|
||||||
their option, further distribute the Covered Software under the terms of
|
|
||||||
either this License or such Secondary License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices (including
|
|
||||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
|
||||||
of liability) contained within the Source Code Form of the Covered
|
|
||||||
Software, except that You may alter any license notices to the extent
|
|
||||||
required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
|
||||||
of any Contributor. You must make it absolutely clear that any such
|
|
||||||
warranty, support, indemnity, or liability obligation is offered by You
|
|
||||||
alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute, judicial
|
|
||||||
order, or regulation then You must: (a) comply with the terms of this License
|
|
||||||
to the maximum extent possible; and (b) describe the limitations and the code
|
|
||||||
they affect. Such description must be placed in a text file included with all
|
|
||||||
distributions of the Covered Software under this License. Except to the
|
|
||||||
extent prohibited by statute or regulation, such description must be
|
|
||||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
|
||||||
understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
|
||||||
if such Contributor fails to notify You of the non-compliance by some
|
|
||||||
reasonable means prior to 60 days after You have come back into compliance.
|
|
||||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
|
||||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
|
||||||
some reasonable means, this is the first time You have received notice of
|
|
||||||
non-compliance with this License from such Contributor, and You become
|
|
||||||
compliant prior to 30 days after Your receipt of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
|
||||||
and cross-claims) alleging that a Contributor Version directly or
|
|
||||||
indirectly infringes any patent, then the rights granted to You by any and
|
|
||||||
all Contributors for the Covered Software under Section 2.1 of this License
|
|
||||||
shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an “as is” basis, without
|
|
||||||
warranty of any kind, either expressed, implied, or statutory, including,
|
|
||||||
without limitation, warranties that the Covered Software is free of defects,
|
|
||||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
|
||||||
risk as to the quality and performance of the Covered Software is with You.
|
|
||||||
Should any Covered Software prove defective in any respect, You (not any
|
|
||||||
Contributor) assume the cost of any necessary servicing, repair, or
|
|
||||||
correction. This disclaimer of warranty constitutes an essential part of this
|
|
||||||
License. No use of any Covered Software is authorized under this License
|
|
||||||
except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from such
|
|
||||||
party’s negligence to the extent applicable law prohibits such limitation.
|
|
||||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
|
||||||
consequential damages, so this exclusion and limitation may not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts of
|
|
||||||
a jurisdiction where the defendant maintains its principal place of business
|
|
||||||
and such litigation shall be governed by laws of that jurisdiction, without
|
|
||||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
|
||||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject matter
|
|
||||||
hereof. If any provision of this License is held to be unenforceable, such
|
|
||||||
provision shall be reformed only to the extent necessary to make it
|
|
||||||
enforceable. Any law or regulation which provides that the language of a
|
|
||||||
contract shall be construed against the drafter shall not be used to construe
|
|
||||||
this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version of
|
|
||||||
the License under which You originally received the Covered Software, or
|
|
||||||
under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a modified
|
|
||||||
version of this License if you rename the license and remove any
|
|
||||||
references to the name of the license steward (except to note that such
|
|
||||||
modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
|
||||||
If You choose to distribute Source Code Form that is Incompatible With
|
|
||||||
Secondary Licenses under the terms of this version of the License, the
|
|
||||||
notice described in Exhibit B of this License must be attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file, then
|
|
||||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
|
||||||
directory) where a recipient would be likely to look for such a notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
|
||||||
|
|
||||||
This Source Code Form is “Incompatible
|
|
||||||
With Secondary Licenses”, as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
||||||
|
|
65
vendor/github.com/hashicorp/go-version/README.md
generated
vendored
65
vendor/github.com/hashicorp/go-version/README.md
generated
vendored
@ -1,65 +0,0 @@
|
|||||||
# Versioning Library for Go
|
|
||||||
[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
|
|
||||||
|
|
||||||
go-version is a library for parsing versions and version constraints,
|
|
||||||
and verifying versions against a set of constraints. go-version
|
|
||||||
can sort a collection of versions properly, handles prerelease/beta
|
|
||||||
versions, can increment versions, etc.
|
|
||||||
|
|
||||||
Versions used with go-version must follow [SemVer](http://semver.org/).
|
|
||||||
|
|
||||||
## Installation and Usage
|
|
||||||
|
|
||||||
Package documentation can be found on
|
|
||||||
[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
|
|
||||||
|
|
||||||
Installation can be done with a normal `go get`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get github.com/hashicorp/go-version
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Version Parsing and Comparison
|
|
||||||
|
|
||||||
```go
|
|
||||||
v1, err := version.NewVersion("1.2")
|
|
||||||
v2, err := version.NewVersion("1.5+metadata")
|
|
||||||
|
|
||||||
// Comparison example. There is also GreaterThan, Equal, and just
|
|
||||||
// a simple Compare that returns an int allowing easy >=, <=, etc.
|
|
||||||
if v1.LessThan(v2) {
|
|
||||||
fmt.Printf("%s is less than %s", v1, v2)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Version Constraints
|
|
||||||
|
|
||||||
```go
|
|
||||||
v1, err := version.NewVersion("1.2")
|
|
||||||
|
|
||||||
// Constraints example.
|
|
||||||
constraints, err := version.NewConstraint(">= 1.0, < 1.4")
|
|
||||||
if constraints.Check(v1) {
|
|
||||||
fmt.Printf("%s satisfies constraints %s", v1, constraints)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Version Sorting
|
|
||||||
|
|
||||||
```go
|
|
||||||
versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
|
|
||||||
versions := make([]*version.Version, len(versionsRaw))
|
|
||||||
for i, raw := range versionsRaw {
|
|
||||||
v, _ := version.NewVersion(raw)
|
|
||||||
versions[i] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// After this, the versions are properly sorted
|
|
||||||
sort.Sort(version.Collection(versions))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Issues and Contributing
|
|
||||||
|
|
||||||
If you find an issue with this library, please report an issue. If you'd
|
|
||||||
like, we welcome any contributions. Fork this library and submit a pull
|
|
||||||
request.
|
|
204
vendor/github.com/hashicorp/go-version/constraint.go
generated
vendored
204
vendor/github.com/hashicorp/go-version/constraint.go
generated
vendored
@ -1,204 +0,0 @@
|
|||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Constraint represents a single constraint for a version, such as
|
|
||||||
// ">= 1.0".
|
|
||||||
type Constraint struct {
|
|
||||||
f constraintFunc
|
|
||||||
check *Version
|
|
||||||
original string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constraints is a slice of constraints. We make a custom type so that
|
|
||||||
// we can add methods to it.
|
|
||||||
type Constraints []*Constraint
|
|
||||||
|
|
||||||
type constraintFunc func(v, c *Version) bool
|
|
||||||
|
|
||||||
var constraintOperators map[string]constraintFunc
|
|
||||||
|
|
||||||
var constraintRegexp *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
constraintOperators = map[string]constraintFunc{
|
|
||||||
"": constraintEqual,
|
|
||||||
"=": constraintEqual,
|
|
||||||
"!=": constraintNotEqual,
|
|
||||||
">": constraintGreaterThan,
|
|
||||||
"<": constraintLessThan,
|
|
||||||
">=": constraintGreaterThanEqual,
|
|
||||||
"<=": constraintLessThanEqual,
|
|
||||||
"~>": constraintPessimistic,
|
|
||||||
}
|
|
||||||
|
|
||||||
ops := make([]string, 0, len(constraintOperators))
|
|
||||||
for k := range constraintOperators {
|
|
||||||
ops = append(ops, regexp.QuoteMeta(k))
|
|
||||||
}
|
|
||||||
|
|
||||||
constraintRegexp = regexp.MustCompile(fmt.Sprintf(
|
|
||||||
`^\s*(%s)\s*(%s)\s*$`,
|
|
||||||
strings.Join(ops, "|"),
|
|
||||||
VersionRegexpRaw))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConstraint will parse one or more constraints from the given
|
|
||||||
// constraint string. The string must be a comma-separated list of
|
|
||||||
// constraints.
|
|
||||||
func NewConstraint(v string) (Constraints, error) {
|
|
||||||
vs := strings.Split(v, ",")
|
|
||||||
result := make([]*Constraint, len(vs))
|
|
||||||
for i, single := range vs {
|
|
||||||
c, err := parseSingle(single)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result[i] = c
|
|
||||||
}
|
|
||||||
|
|
||||||
return Constraints(result), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check tests if a version satisfies all the constraints.
|
|
||||||
func (cs Constraints) Check(v *Version) bool {
|
|
||||||
for _, c := range cs {
|
|
||||||
if !c.Check(v) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the string format of the constraints
|
|
||||||
func (cs Constraints) String() string {
|
|
||||||
csStr := make([]string, len(cs))
|
|
||||||
for i, c := range cs {
|
|
||||||
csStr[i] = c.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(csStr, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check tests if a constraint is validated by the given version.
|
|
||||||
func (c *Constraint) Check(v *Version) bool {
|
|
||||||
return c.f(v, c.check)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Constraint) String() string {
|
|
||||||
return c.original
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSingle(v string) (*Constraint, error) {
|
|
||||||
matches := constraintRegexp.FindStringSubmatch(v)
|
|
||||||
if matches == nil {
|
|
||||||
return nil, fmt.Errorf("Malformed constraint: %s", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
check, err := NewVersion(matches[2])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Constraint{
|
|
||||||
f: constraintOperators[matches[1]],
|
|
||||||
check: check,
|
|
||||||
original: v,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prereleaseCheck(v, c *Version) bool {
|
|
||||||
switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
|
|
||||||
case cPre && vPre:
|
|
||||||
// A constraint with a pre-release can only match a pre-release version
|
|
||||||
// with the same base segments.
|
|
||||||
return reflect.DeepEqual(c.Segments64(), v.Segments64())
|
|
||||||
|
|
||||||
case !cPre && vPre:
|
|
||||||
// A constraint without a pre-release can only match a version without a
|
|
||||||
// pre-release.
|
|
||||||
return false
|
|
||||||
|
|
||||||
case cPre && !vPre:
|
|
||||||
// OK, except with the pessimistic operator
|
|
||||||
case !cPre && !vPre:
|
|
||||||
// OK
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//-------------------------------------------------------------------
|
|
||||||
// Constraint functions
|
|
||||||
//-------------------------------------------------------------------
|
|
||||||
|
|
||||||
func constraintEqual(v, c *Version) bool {
|
|
||||||
return v.Equal(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintNotEqual(v, c *Version) bool {
|
|
||||||
return !v.Equal(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintGreaterThan(v, c *Version) bool {
|
|
||||||
return prereleaseCheck(v, c) && v.Compare(c) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintLessThan(v, c *Version) bool {
|
|
||||||
return prereleaseCheck(v, c) && v.Compare(c) == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintGreaterThanEqual(v, c *Version) bool {
|
|
||||||
return prereleaseCheck(v, c) && v.Compare(c) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintLessThanEqual(v, c *Version) bool {
|
|
||||||
return prereleaseCheck(v, c) && v.Compare(c) <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintPessimistic(v, c *Version) bool {
|
|
||||||
// Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
|
|
||||||
if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the version being checked is naturally less than the constraint, then there
|
|
||||||
// is no way for the version to be valid against the constraint
|
|
||||||
if v.LessThan(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// We'll use this more than once, so grab the length now so it's a little cleaner
|
|
||||||
// to write the later checks
|
|
||||||
cs := len(c.segments)
|
|
||||||
|
|
||||||
// If the version being checked has less specificity than the constraint, then there
|
|
||||||
// is no way for the version to be valid against the constraint
|
|
||||||
if cs > len(v.segments) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the segments in the constraint against those in the version. If the version
|
|
||||||
// being checked, at any point, does not have the same values in each index of the
|
|
||||||
// constraints segments, then it cannot be valid against the constraint.
|
|
||||||
for i := 0; i < c.si-1; i++ {
|
|
||||||
if v.segments[i] != c.segments[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the last part of the segment in the constraint. If the version segment at
|
|
||||||
// this index is less than the constraints segment at this index, then it cannot
|
|
||||||
// be valid against the constraint
|
|
||||||
if c.segments[cs-1] > v.segments[cs-1] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// If nothing has rejected the version by now, it's valid
|
|
||||||
return true
|
|
||||||
}
|
|
380
vendor/github.com/hashicorp/go-version/version.go
generated
vendored
380
vendor/github.com/hashicorp/go-version/version.go
generated
vendored
@ -1,380 +0,0 @@
|
|||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The compiled regular expression used to test the validity of a version.
|
|
||||||
var (
|
|
||||||
versionRegexp *regexp.Regexp
|
|
||||||
semverRegexp *regexp.Regexp
|
|
||||||
)
|
|
||||||
|
|
||||||
// The raw regular expression string used for testing the validity
|
|
||||||
// of a version.
|
|
||||||
const (
|
|
||||||
VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
|
|
||||||
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
|
|
||||||
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
|
|
||||||
`?`
|
|
||||||
|
|
||||||
// SemverRegexpRaw requires a separator between version and prerelease
|
|
||||||
SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
|
|
||||||
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
|
|
||||||
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
|
|
||||||
`?`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Version represents a single version.
|
|
||||||
type Version struct {
|
|
||||||
metadata string
|
|
||||||
pre string
|
|
||||||
segments []int64
|
|
||||||
si int
|
|
||||||
original string
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
|
|
||||||
semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersion parses the given version and returns a new
|
|
||||||
// Version.
|
|
||||||
func NewVersion(v string) (*Version, error) {
|
|
||||||
return newVersion(v, versionRegexp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSemver parses the given version and returns a new
|
|
||||||
// Version that adheres strictly to SemVer specs
|
|
||||||
// https://semver.org/
|
|
||||||
func NewSemver(v string) (*Version, error) {
|
|
||||||
return newVersion(v, semverRegexp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
|
|
||||||
matches := pattern.FindStringSubmatch(v)
|
|
||||||
if matches == nil {
|
|
||||||
return nil, fmt.Errorf("Malformed version: %s", v)
|
|
||||||
}
|
|
||||||
segmentsStr := strings.Split(matches[1], ".")
|
|
||||||
segments := make([]int64, len(segmentsStr))
|
|
||||||
si := 0
|
|
||||||
for i, str := range segmentsStr {
|
|
||||||
val, err := strconv.ParseInt(str, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error parsing version: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
segments[i] = int64(val)
|
|
||||||
si++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Even though we could support more than three segments, if we
|
|
||||||
// got less than three, pad it with 0s. This is to cover the basic
|
|
||||||
// default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
|
|
||||||
for i := len(segments); i < 3; i++ {
|
|
||||||
segments = append(segments, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pre := matches[7]
|
|
||||||
if pre == "" {
|
|
||||||
pre = matches[4]
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Version{
|
|
||||||
metadata: matches[10],
|
|
||||||
pre: pre,
|
|
||||||
segments: segments,
|
|
||||||
si: si,
|
|
||||||
original: v,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must is a helper that wraps a call to a function returning (*Version, error)
|
|
||||||
// and panics if error is non-nil.
|
|
||||||
func Must(v *Version, err error) *Version {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare compares this version to another version. This
|
|
||||||
// returns -1, 0, or 1 if this version is smaller, equal,
|
|
||||||
// or larger than the other version, respectively.
|
|
||||||
//
|
|
||||||
// If you want boolean results, use the LessThan, Equal,
|
|
||||||
// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
|
|
||||||
func (v *Version) Compare(other *Version) int {
|
|
||||||
// A quick, efficient equality check
|
|
||||||
if v.String() == other.String() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
segmentsSelf := v.Segments64()
|
|
||||||
segmentsOther := other.Segments64()
|
|
||||||
|
|
||||||
// If the segments are the same, we must compare on prerelease info
|
|
||||||
if reflect.DeepEqual(segmentsSelf, segmentsOther) {
|
|
||||||
preSelf := v.Prerelease()
|
|
||||||
preOther := other.Prerelease()
|
|
||||||
if preSelf == "" && preOther == "" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if preSelf == "" {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if preOther == "" {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return comparePrereleases(preSelf, preOther)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
|
|
||||||
lenSelf := len(segmentsSelf)
|
|
||||||
lenOther := len(segmentsOther)
|
|
||||||
hS := lenSelf
|
|
||||||
if lenSelf < lenOther {
|
|
||||||
hS = lenOther
|
|
||||||
}
|
|
||||||
// Compare the segments
|
|
||||||
// Because a constraint could have more/less specificity than the version it's
|
|
||||||
// checking, we need to account for a lopsided or jagged comparison
|
|
||||||
for i := 0; i < hS; i++ {
|
|
||||||
if i > lenSelf-1 {
|
|
||||||
// This means Self had the lower specificity
|
|
||||||
// Check to see if the remaining segments in Other are all zeros
|
|
||||||
if !allZero(segmentsOther[i:]) {
|
|
||||||
// if not, it means that Other has to be greater than Self
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if i > lenOther-1 {
|
|
||||||
// this means Other had the lower specificity
|
|
||||||
// Check to see if the remaining segments in Self are all zeros -
|
|
||||||
if !allZero(segmentsSelf[i:]) {
|
|
||||||
//if not, it means that Self has to be greater than Other
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
lhs := segmentsSelf[i]
|
|
||||||
rhs := segmentsOther[i]
|
|
||||||
if lhs == rhs {
|
|
||||||
continue
|
|
||||||
} else if lhs < rhs {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
// Otherwis, rhs was > lhs, they're not equal
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we got this far, they're equal
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func allZero(segs []int64) bool {
|
|
||||||
for _, s := range segs {
|
|
||||||
if s != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePart(preSelf string, preOther string) int {
|
|
||||||
if preSelf == preOther {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var selfInt int64
|
|
||||||
selfNumeric := true
|
|
||||||
selfInt, err := strconv.ParseInt(preSelf, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
selfNumeric = false
|
|
||||||
}
|
|
||||||
|
|
||||||
var otherInt int64
|
|
||||||
otherNumeric := true
|
|
||||||
otherInt, err = strconv.ParseInt(preOther, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
otherNumeric = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// if a part is empty, we use the other to decide
|
|
||||||
if preSelf == "" {
|
|
||||||
if otherNumeric {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if preOther == "" {
|
|
||||||
if selfNumeric {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
if selfNumeric && !otherNumeric {
|
|
||||||
return -1
|
|
||||||
} else if !selfNumeric && otherNumeric {
|
|
||||||
return 1
|
|
||||||
} else if !selfNumeric && !otherNumeric && preSelf > preOther {
|
|
||||||
return 1
|
|
||||||
} else if selfInt > otherInt {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePrereleases(v string, other string) int {
|
|
||||||
// the same pre release!
|
|
||||||
if v == other {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// split both pre releases for analyse their parts
|
|
||||||
selfPreReleaseMeta := strings.Split(v, ".")
|
|
||||||
otherPreReleaseMeta := strings.Split(other, ".")
|
|
||||||
|
|
||||||
selfPreReleaseLen := len(selfPreReleaseMeta)
|
|
||||||
otherPreReleaseLen := len(otherPreReleaseMeta)
|
|
||||||
|
|
||||||
biggestLen := otherPreReleaseLen
|
|
||||||
if selfPreReleaseLen > otherPreReleaseLen {
|
|
||||||
biggestLen = selfPreReleaseLen
|
|
||||||
}
|
|
||||||
|
|
||||||
// loop for parts to find the first difference
|
|
||||||
for i := 0; i < biggestLen; i = i + 1 {
|
|
||||||
partSelfPre := ""
|
|
||||||
if i < selfPreReleaseLen {
|
|
||||||
partSelfPre = selfPreReleaseMeta[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
partOtherPre := ""
|
|
||||||
if i < otherPreReleaseLen {
|
|
||||||
partOtherPre = otherPreReleaseMeta[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
compare := comparePart(partSelfPre, partOtherPre)
|
|
||||||
// if parts are equals, continue the loop
|
|
||||||
if compare != 0 {
|
|
||||||
return compare
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal tests if two versions are equal.
|
|
||||||
func (v *Version) Equal(o *Version) bool {
|
|
||||||
return v.Compare(o) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThan tests if this version is greater than another version.
|
|
||||||
func (v *Version) GreaterThan(o *Version) bool {
|
|
||||||
return v.Compare(o) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThanOrEqualTo tests if this version is greater than or equal to another version.
|
|
||||||
func (v *Version) GreaterThanOrEqual(o *Version) bool {
|
|
||||||
return v.Compare(o) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThan tests if this version is less than another version.
|
|
||||||
func (v *Version) LessThan(o *Version) bool {
|
|
||||||
return v.Compare(o) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThanOrEqualTo tests if this version is less than or equal to another version.
|
|
||||||
func (v *Version) LessThanOrEqual(o *Version) bool {
|
|
||||||
return v.Compare(o) <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns any metadata that was part of the version
|
|
||||||
// string.
|
|
||||||
//
|
|
||||||
// Metadata is anything that comes after the "+" in the version.
|
|
||||||
// For example, with "1.2.3+beta", the metadata is "beta".
|
|
||||||
func (v *Version) Metadata() string {
|
|
||||||
return v.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prerelease returns any prerelease data that is part of the version,
|
|
||||||
// or blank if there is no prerelease data.
|
|
||||||
//
|
|
||||||
// Prerelease information is anything that comes after the "-" in the
|
|
||||||
// version (but before any metadata). For example, with "1.2.3-beta",
|
|
||||||
// the prerelease information is "beta".
|
|
||||||
func (v *Version) Prerelease() string {
|
|
||||||
return v.pre
|
|
||||||
}
|
|
||||||
|
|
||||||
// Segments returns the numeric segments of the version as a slice of ints.
|
|
||||||
//
|
|
||||||
// This excludes any metadata or pre-release information. For example,
|
|
||||||
// for a version "1.2.3-beta", segments will return a slice of
|
|
||||||
// 1, 2, 3.
|
|
||||||
func (v *Version) Segments() []int {
|
|
||||||
segmentSlice := make([]int, len(v.segments))
|
|
||||||
for i, v := range v.segments {
|
|
||||||
segmentSlice[i] = int(v)
|
|
||||||
}
|
|
||||||
return segmentSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
// Segments64 returns the numeric segments of the version as a slice of int64s.
|
|
||||||
//
|
|
||||||
// This excludes any metadata or pre-release information. For example,
|
|
||||||
// for a version "1.2.3-beta", segments will return a slice of
|
|
||||||
// 1, 2, 3.
|
|
||||||
func (v *Version) Segments64() []int64 {
|
|
||||||
result := make([]int64, len(v.segments))
|
|
||||||
copy(result, v.segments)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the full version string included pre-release
|
|
||||||
// and metadata information.
|
|
||||||
//
|
|
||||||
// This value is rebuilt according to the parsed segments and other
|
|
||||||
// information. Therefore, ambiguities in the version string such as
|
|
||||||
// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
|
|
||||||
// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
|
|
||||||
// as shown in the parenthesized examples.
|
|
||||||
func (v *Version) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmtParts := make([]string, len(v.segments))
|
|
||||||
for i, s := range v.segments {
|
|
||||||
// We can ignore err here since we've pre-parsed the values in segments
|
|
||||||
str := strconv.FormatInt(s, 10)
|
|
||||||
fmtParts[i] = str
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
|
|
||||||
if v.pre != "" {
|
|
||||||
fmt.Fprintf(&buf, "-%s", v.pre)
|
|
||||||
}
|
|
||||||
if v.metadata != "" {
|
|
||||||
fmt.Fprintf(&buf, "+%s", v.metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Original returns the original parsed version as-is, including any
|
|
||||||
// potential whitespace, `v` prefix, etc.
|
|
||||||
func (v *Version) Original() string {
|
|
||||||
return v.original
|
|
||||||
}
|
|
17
vendor/github.com/hashicorp/go-version/version_collection.go
generated
vendored
17
vendor/github.com/hashicorp/go-version/version_collection.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
package version
|
|
||||||
|
|
||||||
// Collection is a type that implements the sort.Interface interface
|
|
||||||
// so that versions can be sorted.
|
|
||||||
type Collection []*Version
|
|
||||||
|
|
||||||
func (v Collection) Len() int {
|
|
||||||
return len(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v Collection) Less(i, j int) bool {
|
|
||||||
return v[i].LessThan(v[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v Collection) Swap(i, j int) {
|
|
||||||
v[i], v[j] = v[j], v[i]
|
|
||||||
}
|
|
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
@ -1,223 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
|
||||||
// to recently added entries that have only been accessed once.
|
|
||||||
Default2QRecentRatio = 0.25
|
|
||||||
|
|
||||||
// Default2QGhostEntries is the default ratio of ghost
|
|
||||||
// entries kept to track entries recently evicted
|
|
||||||
Default2QGhostEntries = 0.50
|
|
||||||
)
|
|
||||||
|
|
||||||
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
|
||||||
// 2Q is an enhancement over the standard LRU cache
|
|
||||||
// in that it tracks both frequently and recently used
|
|
||||||
// entries separately. This avoids a burst in access to new
|
|
||||||
// entries from evicting frequently used entries. It adds some
|
|
||||||
// additional tracking overhead to the standard LRU cache, and is
|
|
||||||
// computationally about 2x the cost, and adds some metadata over
|
|
||||||
// head. The ARCCache is similar, but does not require setting any
|
|
||||||
// parameters.
|
|
||||||
type TwoQueueCache struct {
|
|
||||||
size int
|
|
||||||
recentSize int
|
|
||||||
|
|
||||||
recent simplelru.LRUCache
|
|
||||||
frequent simplelru.LRUCache
|
|
||||||
recentEvict simplelru.LRUCache
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New2Q creates a new TwoQueueCache using the default
|
|
||||||
// values for the parameters.
|
|
||||||
func New2Q(size int) (*TwoQueueCache, error) {
|
|
||||||
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New2QParams creates a new TwoQueueCache using the provided
|
|
||||||
// parameter values.
|
|
||||||
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
|
||||||
if size <= 0 {
|
|
||||||
return nil, fmt.Errorf("invalid size")
|
|
||||||
}
|
|
||||||
if recentRatio < 0.0 || recentRatio > 1.0 {
|
|
||||||
return nil, fmt.Errorf("invalid recent ratio")
|
|
||||||
}
|
|
||||||
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
|
||||||
return nil, fmt.Errorf("invalid ghost ratio")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the sub-sizes
|
|
||||||
recentSize := int(float64(size) * recentRatio)
|
|
||||||
evictSize := int(float64(size) * ghostRatio)
|
|
||||||
|
|
||||||
// Allocate the LRUs
|
|
||||||
recent, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
frequent, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the cache
|
|
||||||
c := &TwoQueueCache{
|
|
||||||
size: size,
|
|
||||||
recentSize: recentSize,
|
|
||||||
recent: recent,
|
|
||||||
frequent: frequent,
|
|
||||||
recentEvict: recentEvict,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if this is a frequent value
|
|
||||||
if val, ok := c.frequent.Get(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the value is contained in recent, then we
|
|
||||||
// promote it to frequent
|
|
||||||
if val, ok := c.recent.Peek(key); ok {
|
|
||||||
c.recent.Remove(key)
|
|
||||||
c.frequent.Add(key, val)
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// No hit
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache.
|
|
||||||
func (c *TwoQueueCache) Add(key, value interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if the value is frequently used already,
|
|
||||||
// and just update the value
|
|
||||||
if c.frequent.Contains(key) {
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is recently used, and promote
|
|
||||||
// the value into the frequent list
|
|
||||||
if c.recent.Contains(key) {
|
|
||||||
c.recent.Remove(key)
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the value was recently evicted, add it to the
|
|
||||||
// frequently used list
|
|
||||||
if c.recentEvict.Contains(key) {
|
|
||||||
c.ensureSpace(true)
|
|
||||||
c.recentEvict.Remove(key)
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to the recently seen list
|
|
||||||
c.ensureSpace(false)
|
|
||||||
c.recent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureSpace is used to ensure we have space in the cache
|
|
||||||
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
|
||||||
// If we have space, nothing to do
|
|
||||||
recentLen := c.recent.Len()
|
|
||||||
freqLen := c.frequent.Len()
|
|
||||||
if recentLen+freqLen < c.size {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the recent buffer is larger than
|
|
||||||
// the target, evict from there
|
|
||||||
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
|
||||||
k, _, _ := c.recent.RemoveOldest()
|
|
||||||
c.recentEvict.Add(k, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from the frequent list otherwise
|
|
||||||
c.frequent.RemoveOldest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *TwoQueueCache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.recent.Len() + c.frequent.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache.
|
|
||||||
// The frequently used keys are first in the returned slice.
|
|
||||||
func (c *TwoQueueCache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
k1 := c.frequent.Keys()
|
|
||||||
k2 := c.recent.Keys()
|
|
||||||
return append(k1, k2...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache.
|
|
||||||
func (c *TwoQueueCache) Remove(key interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
if c.frequent.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.recent.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.recentEvict.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *TwoQueueCache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.recent.Purge()
|
|
||||||
c.frequent.Purge()
|
|
||||||
c.recentEvict.Purge()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains is used to check if the cache contains a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.frequent.Contains(key) || c.recent.Contains(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek is used to inspect the cache value of a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
if val, ok := c.frequent.Peek(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
return c.recent.Peek(key)
|
|
||||||
}
|
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
@ -1,362 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
golang-lru
|
|
||||||
==========
|
|
||||||
|
|
||||||
This provides the `lru` package which implements a fixed-size
|
|
||||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
=============
|
|
||||||
|
|
||||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
|
||||||
|
|
||||||
Example
|
|
||||||
=======
|
|
||||||
|
|
||||||
Using the LRU is very simple:
|
|
||||||
|
|
||||||
```go
|
|
||||||
l, _ := New(128)
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
l.Add(i, nil)
|
|
||||||
}
|
|
||||||
if l.Len() != 128 {
|
|
||||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
|
||||||
}
|
|
||||||
```
|
|
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
@ -1,257 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
|
||||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
|
||||||
// frequency and recency of use. This avoids a burst in access to new
|
|
||||||
// entries from evicting the frequently used older entries. It adds some
|
|
||||||
// additional tracking overhead to a standard LRU cache, computationally
|
|
||||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
|
||||||
// with the size of the cache. ARC has been patented by IBM, but is
|
|
||||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
|
||||||
type ARCCache struct {
|
|
||||||
size int // Size is the total capacity of the cache
|
|
||||||
p int // P is the dynamic preference towards T1 or T2
|
|
||||||
|
|
||||||
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
|
|
||||||
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
|
|
||||||
|
|
||||||
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
|
|
||||||
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
|
|
||||||
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewARC creates an ARC of the given size
|
|
||||||
func NewARC(size int) (*ARCCache, error) {
|
|
||||||
// Create the sub LRUs
|
|
||||||
b1, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t1, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t2, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the ARC
|
|
||||||
c := &ARCCache{
|
|
||||||
size: size,
|
|
||||||
p: 0,
|
|
||||||
t1: t1,
|
|
||||||
b1: b1,
|
|
||||||
t2: t2,
|
|
||||||
b2: b2,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// If the value is contained in T1 (recent), then
|
|
||||||
// promote it to T2 (frequent)
|
|
||||||
if val, ok := c.t1.Peek(key); ok {
|
|
||||||
c.t1.Remove(key)
|
|
||||||
c.t2.Add(key, val)
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is contained in T2 (frequent)
|
|
||||||
if val, ok := c.t2.Get(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// No hit
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache.
|
|
||||||
func (c *ARCCache) Add(key, value interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if the value is contained in T1 (recent), and potentially
|
|
||||||
// promote it to frequent T2
|
|
||||||
if c.t1.Contains(key) {
|
|
||||||
c.t1.Remove(key)
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is already in T2 (frequent) and update it
|
|
||||||
if c.t2.Contains(key) {
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this value was recently evicted as part of the
|
|
||||||
// recently used list
|
|
||||||
if c.b1.Contains(key) {
|
|
||||||
// T1 set is too small, increase P appropriately
|
|
||||||
delta := 1
|
|
||||||
b1Len := c.b1.Len()
|
|
||||||
b2Len := c.b2.Len()
|
|
||||||
if b2Len > b1Len {
|
|
||||||
delta = b2Len / b1Len
|
|
||||||
}
|
|
||||||
if c.p+delta >= c.size {
|
|
||||||
c.p = c.size
|
|
||||||
} else {
|
|
||||||
c.p += delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from B1
|
|
||||||
c.b1.Remove(key)
|
|
||||||
|
|
||||||
// Add the key to the frequently used list
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this value was recently evicted as part of the
|
|
||||||
// frequently used list
|
|
||||||
if c.b2.Contains(key) {
|
|
||||||
// T2 set is too small, decrease P appropriately
|
|
||||||
delta := 1
|
|
||||||
b1Len := c.b1.Len()
|
|
||||||
b2Len := c.b2.Len()
|
|
||||||
if b1Len > b2Len {
|
|
||||||
delta = b1Len / b2Len
|
|
||||||
}
|
|
||||||
if delta >= c.p {
|
|
||||||
c.p = 0
|
|
||||||
} else {
|
|
||||||
c.p -= delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from B2
|
|
||||||
c.b2.Remove(key)
|
|
||||||
|
|
||||||
// Add the key to the frequently used list
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep the size of the ghost buffers trim
|
|
||||||
if c.b1.Len() > c.size-c.p {
|
|
||||||
c.b1.RemoveOldest()
|
|
||||||
}
|
|
||||||
if c.b2.Len() > c.p {
|
|
||||||
c.b2.RemoveOldest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to the recently seen list
|
|
||||||
c.t1.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace is used to adaptively evict from either T1 or T2
|
|
||||||
// based on the current learned value of P
|
|
||||||
func (c *ARCCache) replace(b2ContainsKey bool) {
|
|
||||||
t1Len := c.t1.Len()
|
|
||||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
|
||||||
k, _, ok := c.t1.RemoveOldest()
|
|
||||||
if ok {
|
|
||||||
c.b1.Add(k, nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
k, _, ok := c.t2.RemoveOldest()
|
|
||||||
if ok {
|
|
||||||
c.b2.Add(k, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of cached entries
|
|
||||||
func (c *ARCCache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.t1.Len() + c.t2.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns all the cached keys
|
|
||||||
func (c *ARCCache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
k1 := c.t1.Keys()
|
|
||||||
k2 := c.t2.Keys()
|
|
||||||
return append(k1, k2...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove is used to purge a key from the cache
|
|
||||||
func (c *ARCCache) Remove(key interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
if c.t1.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.t2.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.b1.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.b2.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to clear the cache
|
|
||||||
func (c *ARCCache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.t1.Purge()
|
|
||||||
c.t2.Purge()
|
|
||||||
c.b1.Purge()
|
|
||||||
c.b2.Purge()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains is used to check if the cache contains a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *ARCCache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek is used to inspect the cache value of a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
if val, ok := c.t1.Peek(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
return c.t2.Peek(key)
|
|
||||||
}
|
|
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
// Package lru provides three different LRU caches of varying sophistication.
|
|
||||||
//
|
|
||||||
// Cache is a simple LRU cache. It is based on the
|
|
||||||
// LRU implementation in groupcache:
|
|
||||||
// https://github.com/golang/groupcache/tree/master/lru
|
|
||||||
//
|
|
||||||
// TwoQueueCache tracks frequently used and recently used entries separately.
|
|
||||||
// This avoids a burst of accesses from taking out frequently used entries,
|
|
||||||
// at the cost of about 2x computational overhead and some extra bookkeeping.
|
|
||||||
//
|
|
||||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
|
|
||||||
// well as recent usage in both the frequent and recent caches. Its
|
|
||||||
// computational overhead is comparable to TwoQueueCache, but the memory
|
|
||||||
// overhead is linear with the size of the cache.
|
|
||||||
//
|
|
||||||
// ARC has been patented by IBM, so do not use it if that is problematic for
|
|
||||||
// your program.
|
|
||||||
//
|
|
||||||
// All caches in this package take locks while operating, and are therefore
|
|
||||||
// thread-safe for consumers.
|
|
||||||
package lru
|
|
150
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
150
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
@ -1,150 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache is a thread-safe fixed size LRU cache.
|
|
||||||
type Cache struct {
|
|
||||||
lru simplelru.LRUCache
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an LRU of the given size.
|
|
||||||
func New(size int) (*Cache, error) {
|
|
||||||
return NewWithEvict(size, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
|
||||||
// callback.
|
|
||||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
|
||||||
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := &Cache{
|
|
||||||
lru: lru,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *Cache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
c.lru.Purge()
|
|
||||||
c.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
|
||||||
func (c *Cache) Add(key, value interface{}) (evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
value, ok = c.lru.Get(key)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return value, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if a key is in the cache, without updating the
|
|
||||||
// recent-ness or deleting it for being stale.
|
|
||||||
func (c *Cache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
containKey := c.lru.Contains(key)
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return containKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek returns the key value (or undefined if not found) without updating
|
|
||||||
// the "recently used"-ness of the key.
|
|
||||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
value, ok = c.lru.Peek(key)
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return value, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
|
||||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
|
||||||
// Returns whether found and whether an eviction occurred.
|
|
||||||
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
if c.lru.Contains(key) {
|
|
||||||
return true, false
|
|
||||||
}
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
return false, evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekOrAdd checks if a key is in the cache without updating the
|
|
||||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
|
||||||
// Returns whether found and whether an eviction occurred.
|
|
||||||
func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
previous, ok = c.lru.Peek(key)
|
|
||||||
if ok {
|
|
||||||
return previous, true, false
|
|
||||||
}
|
|
||||||
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
return nil, false, evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache.
|
|
||||||
func (c *Cache) Remove(key interface{}) (present bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
present = c.lru.Remove(key)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize changes the cache size.
|
|
||||||
func (c *Cache) Resize(size int) (evicted int) {
|
|
||||||
c.lock.Lock()
|
|
||||||
evicted = c.lru.Resize(size)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOldest removes the oldest item from the cache.
|
|
||||||
func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
key, value, ok = c.lru.RemoveOldest()
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOldest returns the oldest entry
|
|
||||||
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
key, value, ok = c.lru.GetOldest()
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
func (c *Cache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
keys := c.lru.Keys()
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *Cache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
length := c.lru.Len()
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return length
|
|
||||||
}
|
|
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
@ -1,177 +0,0 @@
|
|||||||
package simplelru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/list"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
|
||||||
type EvictCallback func(key interface{}, value interface{})
|
|
||||||
|
|
||||||
// LRU implements a non-thread safe fixed size LRU cache
|
|
||||||
type LRU struct {
|
|
||||||
size int
|
|
||||||
evictList *list.List
|
|
||||||
items map[interface{}]*list.Element
|
|
||||||
onEvict EvictCallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// entry is used to hold a value in the evictList
|
|
||||||
type entry struct {
|
|
||||||
key interface{}
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLRU constructs an LRU of the given size
|
|
||||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
|
||||||
if size <= 0 {
|
|
||||||
return nil, errors.New("Must provide a positive size")
|
|
||||||
}
|
|
||||||
c := &LRU{
|
|
||||||
size: size,
|
|
||||||
evictList: list.New(),
|
|
||||||
items: make(map[interface{}]*list.Element),
|
|
||||||
onEvict: onEvict,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *LRU) Purge() {
|
|
||||||
for k, v := range c.items {
|
|
||||||
if c.onEvict != nil {
|
|
||||||
c.onEvict(k, v.Value.(*entry).value)
|
|
||||||
}
|
|
||||||
delete(c.items, k)
|
|
||||||
}
|
|
||||||
c.evictList.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
|
||||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
|
||||||
// Check for existing item
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.evictList.MoveToFront(ent)
|
|
||||||
ent.Value.(*entry).value = value
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add new item
|
|
||||||
ent := &entry{key, value}
|
|
||||||
entry := c.evictList.PushFront(ent)
|
|
||||||
c.items[key] = entry
|
|
||||||
|
|
||||||
evict := c.evictList.Len() > c.size
|
|
||||||
// Verify size not exceeded
|
|
||||||
if evict {
|
|
||||||
c.removeOldest()
|
|
||||||
}
|
|
||||||
return evict
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.evictList.MoveToFront(ent)
|
|
||||||
if ent.Value.(*entry) == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return ent.Value.(*entry).value, true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
|
||||||
// or deleting it for being stale.
|
|
||||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
|
||||||
_, ok = c.items[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek returns the key value (or undefined if not found) without updating
|
|
||||||
// the "recently used"-ness of the key.
|
|
||||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
var ent *list.Element
|
|
||||||
if ent, ok = c.items[key]; ok {
|
|
||||||
return ent.Value.(*entry).value, true
|
|
||||||
}
|
|
||||||
return nil, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache, returning if the
|
|
||||||
// key was contained.
|
|
||||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.removeElement(ent)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOldest removes the oldest item from the cache.
|
|
||||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
c.removeElement(ent)
|
|
||||||
kv := ent.Value.(*entry)
|
|
||||||
return kv.key, kv.value, true
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOldest returns the oldest entry
|
|
||||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
kv := ent.Value.(*entry)
|
|
||||||
return kv.key, kv.value, true
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
func (c *LRU) Keys() []interface{} {
|
|
||||||
keys := make([]interface{}, len(c.items))
|
|
||||||
i := 0
|
|
||||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
|
||||||
keys[i] = ent.Value.(*entry).key
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *LRU) Len() int {
|
|
||||||
return c.evictList.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize changes the cache size.
|
|
||||||
func (c *LRU) Resize(size int) (evicted int) {
|
|
||||||
diff := c.Len() - size
|
|
||||||
if diff < 0 {
|
|
||||||
diff = 0
|
|
||||||
}
|
|
||||||
for i := 0; i < diff; i++ {
|
|
||||||
c.removeOldest()
|
|
||||||
}
|
|
||||||
c.size = size
|
|
||||||
return diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeOldest removes the oldest item from the cache.
|
|
||||||
func (c *LRU) removeOldest() {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
c.removeElement(ent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeElement is used to remove a given list element from the cache
|
|
||||||
func (c *LRU) removeElement(e *list.Element) {
|
|
||||||
c.evictList.Remove(e)
|
|
||||||
kv := e.Value.(*entry)
|
|
||||||
delete(c.items, kv.key)
|
|
||||||
if c.onEvict != nil {
|
|
||||||
c.onEvict(kv.key, kv.value)
|
|
||||||
}
|
|
||||||
}
|
|
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
@ -1,39 +0,0 @@
|
|||||||
package simplelru
|
|
||||||
|
|
||||||
// LRUCache is the interface for simple LRU cache.
|
|
||||||
type LRUCache interface {
|
|
||||||
// Adds a value to the cache, returns true if an eviction occurred and
|
|
||||||
// updates the "recently used"-ness of the key.
|
|
||||||
Add(key, value interface{}) bool
|
|
||||||
|
|
||||||
// Returns key's value from the cache and
|
|
||||||
// updates the "recently used"-ness of the key. #value, isFound
|
|
||||||
Get(key interface{}) (value interface{}, ok bool)
|
|
||||||
|
|
||||||
// Checks if a key exists in cache without updating the recent-ness.
|
|
||||||
Contains(key interface{}) (ok bool)
|
|
||||||
|
|
||||||
// Returns key's value without updating the "recently used"-ness of the key.
|
|
||||||
Peek(key interface{}) (value interface{}, ok bool)
|
|
||||||
|
|
||||||
// Removes a key from the cache.
|
|
||||||
Remove(key interface{}) bool
|
|
||||||
|
|
||||||
// Removes the oldest entry from cache.
|
|
||||||
RemoveOldest() (interface{}, interface{}, bool)
|
|
||||||
|
|
||||||
// Returns the oldest entry from the cache. #key, value, isFound
|
|
||||||
GetOldest() (interface{}, interface{}, bool)
|
|
||||||
|
|
||||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
Keys() []interface{}
|
|
||||||
|
|
||||||
// Returns the number of items in the cache.
|
|
||||||
Len() int
|
|
||||||
|
|
||||||
// Clears all cache entries.
|
|
||||||
Purge()
|
|
||||||
|
|
||||||
// Resizes cache, returning number evicted
|
|
||||||
Resize(int) int
|
|
||||||
}
|
|
2
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
2
vendor/github.com/hashicorp/vault/api/auth.go
generated
vendored
@ -63,7 +63,7 @@ func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[s
|
|||||||
return nil, fmt.Errorf("secret does not contain MFARequirements")
|
return nil, fmt.Errorf("secret does not contain MFARequirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload)
|
s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
49
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
49
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
@ -24,12 +24,9 @@ import (
|
|||||||
"github.com/hashicorp/go-retryablehttp"
|
"github.com/hashicorp/go-retryablehttp"
|
||||||
"github.com/hashicorp/go-rootcerts"
|
"github.com/hashicorp/go-rootcerts"
|
||||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||||
|
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/strutil"
|
|
||||||
"github.com/hashicorp/vault/sdk/logical"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -56,7 +53,19 @@ const (
|
|||||||
HeaderIndex = "X-Vault-Index"
|
HeaderIndex = "X-Vault-Index"
|
||||||
HeaderForward = "X-Vault-Forward"
|
HeaderForward = "X-Vault-Forward"
|
||||||
HeaderInconsistent = "X-Vault-Inconsistent"
|
HeaderInconsistent = "X-Vault-Inconsistent"
|
||||||
TLSErrorString = "This error usually means that the server is running with TLS disabled\n" +
|
|
||||||
|
// NamespaceHeaderName is the header set to specify which namespace the
|
||||||
|
// request is indented for.
|
||||||
|
NamespaceHeaderName = "X-Vault-Namespace"
|
||||||
|
|
||||||
|
// AuthHeaderName is the name of the header containing the token.
|
||||||
|
AuthHeaderName = "X-Vault-Token"
|
||||||
|
|
||||||
|
// RequestHeaderName is the name of the header used by the Agent for
|
||||||
|
// SSRF protection.
|
||||||
|
RequestHeaderName = "X-Vault-Request"
|
||||||
|
|
||||||
|
TLSErrorString = "This error usually means that the server is running with TLS disabled\n" +
|
||||||
"but the client is configured to use TLS. Please either enable TLS\n" +
|
"but the client is configured to use TLS. Please either enable TLS\n" +
|
||||||
"on the server or run the client with -address set to an address\n" +
|
"on the server or run the client with -address set to an address\n" +
|
||||||
"that uses the http protocol:\n\n" +
|
"that uses the http protocol:\n\n" +
|
||||||
@ -621,7 +630,7 @@ func NewClient(c *Config) (*Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add the VaultRequest SSRF protection header
|
// Add the VaultRequest SSRF protection header
|
||||||
client.headers[consts.RequestHeaderName] = []string{"true"}
|
client.headers[RequestHeaderName] = []string{"true"}
|
||||||
|
|
||||||
if token := os.Getenv(EnvVaultToken); token != "" {
|
if token := os.Getenv(EnvVaultToken); token != "" {
|
||||||
client.token = token
|
client.token = token
|
||||||
@ -938,7 +947,7 @@ func (c *Client) setNamespace(namespace string) {
|
|||||||
c.headers = make(http.Header)
|
c.headers = make(http.Header)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.headers.Set(consts.NamespaceHeaderName, namespace)
|
c.headers.Set(NamespaceHeaderName, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearNamespace removes the namespace header if set.
|
// ClearNamespace removes the namespace header if set.
|
||||||
@ -946,7 +955,7 @@ func (c *Client) ClearNamespace() {
|
|||||||
c.modifyLock.Lock()
|
c.modifyLock.Lock()
|
||||||
defer c.modifyLock.Unlock()
|
defer c.modifyLock.Unlock()
|
||||||
if c.headers != nil {
|
if c.headers != nil {
|
||||||
c.headers.Del(consts.NamespaceHeaderName)
|
c.headers.Del(NamespaceHeaderName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -958,7 +967,7 @@ func (c *Client) Namespace() string {
|
|||||||
if c.headers == nil {
|
if c.headers == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return c.headers.Get(consts.NamespaceHeaderName)
|
return c.headers.Get(NamespaceHeaderName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithNamespace makes a shallow copy of Client, modifies it to use
|
// WithNamespace makes a shallow copy of Client, modifies it to use
|
||||||
@ -1292,7 +1301,7 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon
|
|||||||
checkRetry := c.config.CheckRetry
|
checkRetry := c.config.CheckRetry
|
||||||
backoff := c.config.Backoff
|
backoff := c.config.Backoff
|
||||||
httpClient := c.config.HttpClient
|
httpClient := c.config.HttpClient
|
||||||
ns := c.headers.Get(consts.NamespaceHeaderName)
|
ns := c.headers.Get(NamespaceHeaderName)
|
||||||
outputCurlString := c.config.OutputCurlString
|
outputCurlString := c.config.OutputCurlString
|
||||||
outputPolicy := c.config.OutputPolicy
|
outputPolicy := c.config.OutputPolicy
|
||||||
logger := c.config.Logger
|
logger := c.config.Logger
|
||||||
@ -1305,9 +1314,9 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon
|
|||||||
// e.g. calls using (*Client).WithNamespace
|
// e.g. calls using (*Client).WithNamespace
|
||||||
switch ns {
|
switch ns {
|
||||||
case "":
|
case "":
|
||||||
r.Headers.Del(consts.NamespaceHeaderName)
|
r.Headers.Del(NamespaceHeaderName)
|
||||||
default:
|
default:
|
||||||
r.Headers.Set(consts.NamespaceHeaderName, ns)
|
r.Headers.Set(NamespaceHeaderName, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cb := range c.requestCallbacks {
|
for _, cb := range c.requestCallbacks {
|
||||||
@ -1460,8 +1469,8 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// explicitly set the namespace header to current client
|
// explicitly set the namespace header to current client
|
||||||
if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" {
|
if ns := c.headers.Get(NamespaceHeaderName); ns != "" {
|
||||||
r.Headers.Set(consts.NamespaceHeaderName, ns)
|
r.Headers.Set(NamespaceHeaderName, ns)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1482,7 +1491,7 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo
|
|||||||
req.Host = r.URL.Host
|
req.Host = r.URL.Host
|
||||||
|
|
||||||
if len(r.ClientToken) != 0 {
|
if len(r.ClientToken) != 0 {
|
||||||
req.Header.Set(consts.AuthHeaderName, r.ClientToken)
|
req.Header.Set(AuthHeaderName, r.ClientToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.WrapTTL) != 0 {
|
if len(r.WrapTTL) != 0 {
|
||||||
@ -1672,7 +1681,13 @@ func MergeReplicationStates(old []string, new string) []string {
|
|||||||
return strutil.RemoveDuplicates(ret, false)
|
return strutil.RemoveDuplicates(ret, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) {
|
type WALState struct {
|
||||||
|
ClusterID string
|
||||||
|
LocalIndex uint64
|
||||||
|
ReplicatedIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) {
|
||||||
cooked, err := base64.StdEncoding.DecodeString(raw)
|
cooked, err := base64.StdEncoding.DecodeString(raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1710,7 +1725,7 @@ func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error
|
|||||||
return nil, fmt.Errorf("invalid replicated index in state header: %w", err)
|
return nil, fmt.Errorf("invalid replicated index in state header: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &logical.WALState{
|
return &WALState{
|
||||||
ClusterID: pieces[1],
|
ClusterID: pieces[1],
|
||||||
LocalIndex: localIndex,
|
LocalIndex: localIndex,
|
||||||
ReplicatedIndex: replicatedIndex,
|
ReplicatedIndex: replicatedIndex,
|
||||||
|
4
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
4
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
@ -366,10 +366,12 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
timer := time.NewTimer(sleepDuration)
|
||||||
select {
|
select {
|
||||||
case <-r.stopCh:
|
case <-r.stopCh:
|
||||||
|
timer.Stop()
|
||||||
return nil
|
return nil
|
||||||
case <-time.After(sleepDuration):
|
case <-timer.C:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
6
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
@ -3,6 +3,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -11,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -390,7 +390,9 @@ func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) (
|
|||||||
|
|
||||||
wrappedSecret := new(Secret)
|
wrappedSecret := new(Secret)
|
||||||
buf := bytes.NewBufferString(secret.Data["response"].(string))
|
buf := bytes.NewBufferString(secret.Data["response"].(string))
|
||||||
if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil {
|
dec := json.NewDecoder(buf)
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(wrappedSecret); err != nil {
|
||||||
return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err)
|
return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
package consts
|
package api
|
||||||
|
|
||||||
|
// NOTE: this file was copied from
|
||||||
|
// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go
|
||||||
|
// Any changes made should be made to both files at the same time.
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
4
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
4
vendor/github.com/hashicorp/vault/api/request.go
generated
vendored
@ -8,8 +8,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
||||||
|
|
||||||
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -127,7 +125,7 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(r.ClientToken) != 0 {
|
if len(r.ClientToken) != 0 {
|
||||||
req.Header.Set(consts.AuthHeaderName, r.ClientToken)
|
req.Header.Set(AuthHeaderName, r.ClientToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.WrapTTL) != 0 {
|
if len(r.WrapTTL) != 0 {
|
||||||
|
14
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
14
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
@ -2,13 +2,11 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Response is a raw response that wraps an HTTP response.
|
// Response is a raw response that wraps an HTTP response.
|
||||||
@ -20,7 +18,9 @@ type Response struct {
|
|||||||
// will consume the response body, but will not close it. Close must
|
// will consume the response body, but will not close it. Close must
|
||||||
// still be called.
|
// still be called.
|
||||||
func (r *Response) DecodeJSON(out interface{}) error {
|
func (r *Response) DecodeJSON(out interface{}) error {
|
||||||
return jsonutil.DecodeJSONFromReader(r.Body, out)
|
dec := json.NewDecoder(r.Body)
|
||||||
|
dec.UseNumber()
|
||||||
|
return dec.Decode(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns an error response if there is one. If there is an error,
|
// Error returns an error response if there is one. If there is an error,
|
||||||
@ -42,7 +42,7 @@ func (r *Response) Error() error {
|
|||||||
|
|
||||||
r.Body.Close()
|
r.Body.Close()
|
||||||
r.Body = ioutil.NopCloser(bodyBuf)
|
r.Body = ioutil.NopCloser(bodyBuf)
|
||||||
ns := r.Header.Get(consts.NamespaceHeaderName)
|
ns := r.Header.Get(NamespaceHeaderName)
|
||||||
|
|
||||||
// Build up the error object
|
// Build up the error object
|
||||||
respErr := &ResponseError{
|
respErr := &ResponseError{
|
||||||
@ -56,7 +56,9 @@ func (r *Response) Error() error {
|
|||||||
// in a bytes.Reader here so that the JSON decoder doesn't move the
|
// in a bytes.Reader here so that the JSON decoder doesn't move the
|
||||||
// read pointer for the original buffer.
|
// read pointer for the original buffer.
|
||||||
var resp ErrorResponse
|
var resp ErrorResponse
|
||||||
if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil {
|
dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes()))
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(&resp); err != nil {
|
||||||
// Store the fact that we couldn't decode the errors
|
// Store the fact that we couldn't decode the errors
|
||||||
respErr.RawError = true
|
respErr.RawError = true
|
||||||
respErr.Errors = []string{bodyBuf.String()}
|
respErr.Errors = []string{bodyBuf.String()}
|
||||||
|
28
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
28
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
@ -11,8 +11,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
|
||||||
"github.com/hashicorp/vault/sdk/logical"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Secret is the structure returned for every secret within Vault.
|
// Secret is the structure returned for every secret within Vault.
|
||||||
@ -283,6 +281,22 @@ type SecretWrapInfo struct {
|
|||||||
WrappedAccessor string `json:"wrapped_accessor"`
|
WrappedAccessor string `json:"wrapped_accessor"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MFAMethodID struct {
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
UsesPasscode bool `json:"uses_passcode,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MFAConstraintAny struct {
|
||||||
|
Any []*MFAMethodID `json:"any,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MFARequirement struct {
|
||||||
|
MFARequestID string `json:"mfa_request_id,omitempty"`
|
||||||
|
MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// SecretAuth is the structure containing auth information if we have it.
|
// SecretAuth is the structure containing auth information if we have it.
|
||||||
type SecretAuth struct {
|
type SecretAuth struct {
|
||||||
ClientToken string `json:"client_token"`
|
ClientToken string `json:"client_token"`
|
||||||
@ -297,7 +311,7 @@ type SecretAuth struct {
|
|||||||
LeaseDuration int `json:"lease_duration"`
|
LeaseDuration int `json:"lease_duration"`
|
||||||
Renewable bool `json:"renewable"`
|
Renewable bool `json:"renewable"`
|
||||||
|
|
||||||
MFARequirement *logical.MFARequirement `json:"mfa_requirement"`
|
MFARequirement *MFARequirement `json:"mfa_requirement"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseSecret is used to parse a secret value from JSON from an io.Reader.
|
// ParseSecret is used to parse a secret value from JSON from an io.Reader.
|
||||||
@ -323,14 +337,18 @@ func ParseSecret(r io.Reader) (*Secret, error) {
|
|||||||
|
|
||||||
// First decode the JSON into a map[string]interface{}
|
// First decode the JSON into a map[string]interface{}
|
||||||
var secret Secret
|
var secret Secret
|
||||||
if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil {
|
dec := json.NewDecoder(&buf)
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(&secret); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the secret is null, add raw data to secret data if present
|
// If the secret is null, add raw data to secret data if present
|
||||||
if reflect.DeepEqual(secret, Secret{}) {
|
if reflect.DeepEqual(secret, Secret{}) {
|
||||||
data := make(map[string]interface{})
|
data := make(map[string]interface{})
|
||||||
if err := jsonutil.DecodeJSONFromReader(&teebuf, &data); err != nil {
|
dec := json.NewDecoder(&teebuf)
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(&data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
errRaw, errPresent := data["errors"]
|
errRaw, errPresent := data["errors"]
|
||||||
|
30
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
30
vendor/github.com/hashicorp/vault/api/ssh_agent.go
generated
vendored
@ -15,7 +15,6 @@ import (
|
|||||||
rootcerts "github.com/hashicorp/go-rootcerts"
|
rootcerts "github.com/hashicorp/go-rootcerts"
|
||||||
"github.com/hashicorp/hcl"
|
"github.com/hashicorp/hcl"
|
||||||
"github.com/hashicorp/hcl/hcl/ast"
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
"github.com/hashicorp/vault/sdk/helper/hclutil"
|
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -169,7 +168,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
|
|||||||
"tls_skip_verify",
|
"tls_skip_verify",
|
||||||
"tls_server_name",
|
"tls_server_name",
|
||||||
}
|
}
|
||||||
if err := hclutil.CheckHCLKeys(list, valid); err != nil {
|
if err := CheckHCLKeys(list, valid); err != nil {
|
||||||
return nil, multierror.Prefix(err, "ssh_helper:")
|
return nil, multierror.Prefix(err, "ssh_helper:")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,6 +184,33 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
|
|||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CheckHCLKeys(node ast.Node, valid []string) error {
|
||||||
|
var list *ast.ObjectList
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.ObjectList:
|
||||||
|
list = n
|
||||||
|
case *ast.ObjectType:
|
||||||
|
list = n.List
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot check HCL keys of type %T", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
validMap := make(map[string]struct{}, len(valid))
|
||||||
|
for _, v := range valid {
|
||||||
|
validMap[v] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var result error
|
||||||
|
for _, item := range list.Items {
|
||||||
|
key := item.Keys[0].Token.Value().(string)
|
||||||
|
if _, ok := validMap[key]; !ok {
|
||||||
|
result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend
|
// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend
|
||||||
// mounted at default path ("ssh").
|
// mounted at default path ("ssh").
|
||||||
func (c *Client) SSHHelper() *SSHHelper {
|
func (c *Client) SSHHelper() *SSHHelper {
|
||||||
|
4
vendor/github.com/hashicorp/vault/api/sys_monitor.go
generated
vendored
4
vendor/github.com/hashicorp/vault/api/sys_monitor.go
generated
vendored
@ -5,8 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Monitor returns a channel that outputs strings containing the log messages
|
// Monitor returns a channel that outputs strings containing the log messages
|
||||||
@ -20,7 +18,7 @@ func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (c
|
|||||||
r.Params.Add("log_level", logLevel)
|
r.Params.Add("log_level", logLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() {
|
if logFormat == "" {
|
||||||
r.Params.Add("log_format", "standard")
|
r.Params.Add("log_format", "standard")
|
||||||
} else {
|
} else {
|
||||||
r.Params.Add("log_format", logFormat)
|
r.Params.Add("log_format", logFormat)
|
||||||
|
25
vendor/github.com/hashicorp/vault/api/sys_plugins.go
generated
vendored
25
vendor/github.com/hashicorp/vault/api/sys_plugins.go
generated
vendored
@ -7,20 +7,19 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListPluginsInput is used as input to the ListPlugins function.
|
// ListPluginsInput is used as input to the ListPlugins function.
|
||||||
type ListPluginsInput struct {
|
type ListPluginsInput struct {
|
||||||
// Type of the plugin. Required.
|
// Type of the plugin. Required.
|
||||||
Type consts.PluginType `json:"type"`
|
Type PluginType `json:"type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPluginsResponse is the response from the ListPlugins call.
|
// ListPluginsResponse is the response from the ListPlugins call.
|
||||||
type ListPluginsResponse struct {
|
type ListPluginsResponse struct {
|
||||||
// PluginsByType is the list of plugins by type.
|
// PluginsByType is the list of plugins by type.
|
||||||
PluginsByType map[consts.PluginType][]string `json:"types"`
|
PluginsByType map[PluginType][]string `json:"types"`
|
||||||
|
|
||||||
Details []PluginDetails `json:"details,omitempty"`
|
Details []PluginDetails `json:"details,omitempty"`
|
||||||
|
|
||||||
@ -68,11 +67,11 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := &ListPluginsResponse{
|
result := &ListPluginsResponse{
|
||||||
PluginsByType: make(map[consts.PluginType][]string),
|
PluginsByType: make(map[PluginType][]string),
|
||||||
}
|
}
|
||||||
switch i.Type {
|
switch i.Type {
|
||||||
case consts.PluginTypeUnknown:
|
case PluginTypeUnknown:
|
||||||
for _, pluginType := range consts.PluginTypes {
|
for _, pluginType := range PluginTypes {
|
||||||
pluginsRaw, ok := secret.Data[pluginType.String()]
|
pluginsRaw, ok := secret.Data[pluginType.String()]
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
@ -113,7 +112,7 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch i.Type {
|
switch i.Type {
|
||||||
case consts.PluginTypeUnknown:
|
case PluginTypeUnknown:
|
||||||
result.Details = details
|
result.Details = details
|
||||||
default:
|
default:
|
||||||
// Filter for just the queried type.
|
// Filter for just the queried type.
|
||||||
@ -133,8 +132,8 @@ type GetPluginInput struct {
|
|||||||
Name string `json:"-"`
|
Name string `json:"-"`
|
||||||
|
|
||||||
// Type of the plugin. Required.
|
// Type of the plugin. Required.
|
||||||
Type consts.PluginType `json:"type"`
|
Type PluginType `json:"type"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPluginResponse is the response from the GetPlugin call.
|
// GetPluginResponse is the response from the GetPlugin call.
|
||||||
@ -186,7 +185,7 @@ type RegisterPluginInput struct {
|
|||||||
Name string `json:"-"`
|
Name string `json:"-"`
|
||||||
|
|
||||||
// Type of the plugin. Required.
|
// Type of the plugin. Required.
|
||||||
Type consts.PluginType `json:"type"`
|
Type PluginType `json:"type"`
|
||||||
|
|
||||||
// Args is the list of args to spawn the process with.
|
// Args is the list of args to spawn the process with.
|
||||||
Args []string `json:"args,omitempty"`
|
Args []string `json:"args,omitempty"`
|
||||||
@ -231,7 +230,7 @@ type DeregisterPluginInput struct {
|
|||||||
Name string `json:"-"`
|
Name string `json:"-"`
|
||||||
|
|
||||||
// Type of the plugin. Required.
|
// Type of the plugin. Required.
|
||||||
Type consts.PluginType `json:"type"`
|
Type PluginType `json:"type"`
|
||||||
|
|
||||||
// Version of the plugin. Optional.
|
// Version of the plugin. Optional.
|
||||||
Version string `json:"version,omitempty"`
|
Version string `json:"version,omitempty"`
|
||||||
@ -368,11 +367,11 @@ func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// catalogPathByType is a helper to construct the proper API path by plugin type
|
// catalogPathByType is a helper to construct the proper API path by plugin type
|
||||||
func catalogPathByType(pluginType consts.PluginType, name string) string {
|
func catalogPathByType(pluginType PluginType, name string) string {
|
||||||
path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name)
|
path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name)
|
||||||
|
|
||||||
// Backwards compat, if type is not provided then use old path
|
// Backwards compat, if type is not provided then use old path
|
||||||
if pluginType == consts.PluginTypeUnknown {
|
if pluginType == PluginTypeUnknown {
|
||||||
path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name)
|
path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1386
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
1386
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user