mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: bump github.com/hashicorp/vault/api from 1.1.1 to 1.2.0
Bumps [github.com/hashicorp/vault/api](https://github.com/hashicorp/vault) from 1.1.1 to 1.2.0. - [Release notes](https://github.com/hashicorp/vault/releases) - [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/vault/compare/v1.1.1...v1.2.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/vault/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
9bd9f5e91d
commit
5280b67327
2
vendor/github.com/hashicorp/vault/api/README.md
generated
vendored
2
vendor/github.com/hashicorp/vault/api/README.md
generated
vendored
@ -3,4 +3,6 @@ Vault API
|
||||
|
||||
This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server.
|
||||
|
||||
For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples/tree/main/go) repo.
|
||||
|
||||
[](https://godoc.org/github.com/hashicorp/vault/api)
|
272
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
272
vendor/github.com/hashicorp/vault/api/client.go
generated
vendored
@ -2,7 +2,11 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -19,10 +23,13 @@ import (
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
||||
rootcerts "github.com/hashicorp/go-rootcerts"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/parseutil"
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/strutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -42,6 +49,8 @@ const (
|
||||
EnvVaultToken = "VAULT_TOKEN"
|
||||
EnvVaultMFA = "VAULT_MFA"
|
||||
EnvRateLimit = "VAULT_RATE_LIMIT"
|
||||
EnvHTTPProxy = "VAULT_HTTP_PROXY"
|
||||
HeaderIndex = "X-Vault-Index"
|
||||
)
|
||||
|
||||
// Deprecated values
|
||||
@ -125,6 +134,19 @@ type Config struct {
|
||||
|
||||
// SRVLookup enables the client to lookup the host through DNS SRV lookup
|
||||
SRVLookup bool
|
||||
|
||||
// CloneHeaders ensures that the source client's headers are copied to
|
||||
// its clone.
|
||||
CloneHeaders bool
|
||||
|
||||
// ReadYourWrites ensures isolated read-after-write semantics by
|
||||
// providing discovered cluster replication states in each request.
|
||||
// The shared state is automatically propagated to all Client clones.
|
||||
//
|
||||
// Note: Careful consideration should be made prior to enabling this setting
|
||||
// since there will be a performance penalty paid upon each request.
|
||||
// This feature requires Enterprise server-side.
|
||||
ReadYourWrites bool
|
||||
}
|
||||
|
||||
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
|
||||
@ -200,7 +222,7 @@ func DefaultConfig() *Config {
|
||||
return config
|
||||
}
|
||||
|
||||
// ConfigureTLS takes a set of TLS configurations and applies those to the the
|
||||
// ConfigureTLS takes a set of TLS configurations and applies those to the
|
||||
// HTTP client.
|
||||
func (c *Config) ConfigureTLS(t *TLSConfig) error {
|
||||
if c.HttpClient == nil {
|
||||
@ -268,6 +290,7 @@ func (c *Config) ReadEnvironment() error {
|
||||
var envMaxRetries *uint64
|
||||
var envSRVLookup bool
|
||||
var limit *rate.Limiter
|
||||
var envHTTPProxy string
|
||||
|
||||
// Parse the environment variables
|
||||
if v := os.Getenv(EnvVaultAddress); v != "" {
|
||||
@ -336,6 +359,10 @@ func (c *Config) ReadEnvironment() error {
|
||||
envTLSServerName = v
|
||||
}
|
||||
|
||||
if v := os.Getenv(EnvHTTPProxy); v != "" {
|
||||
envHTTPProxy = v
|
||||
}
|
||||
|
||||
// Configure the HTTP clients TLS configuration.
|
||||
t := &TLSConfig{
|
||||
CACert: envCACert,
|
||||
@ -372,6 +399,16 @@ func (c *Config) ReadEnvironment() error {
|
||||
c.Timeout = envClientTimeout
|
||||
}
|
||||
|
||||
if envHTTPProxy != "" {
|
||||
url, err := url.Parse(envHTTPProxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transport := c.HttpClient.Transport.(*http.Transport)
|
||||
transport.Proxy = http.ProxyURL(url)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -390,16 +427,17 @@ func parseRateLimit(val string) (rate float64, burst int, err error) {
|
||||
|
||||
// Client is the client to the Vault API. Create a client with NewClient.
|
||||
type Client struct {
|
||||
modifyLock sync.RWMutex
|
||||
addr *url.URL
|
||||
config *Config
|
||||
token string
|
||||
headers http.Header
|
||||
wrappingLookupFunc WrappingLookupFunc
|
||||
mfaCreds []string
|
||||
policyOverride bool
|
||||
requestCallbacks []RequestCallback
|
||||
responseCallbacks []ResponseCallback
|
||||
modifyLock sync.RWMutex
|
||||
addr *url.URL
|
||||
config *Config
|
||||
token string
|
||||
headers http.Header
|
||||
wrappingLookupFunc WrappingLookupFunc
|
||||
mfaCreds []string
|
||||
policyOverride bool
|
||||
requestCallbacks []RequestCallback
|
||||
responseCallbacks []ResponseCallback
|
||||
replicationStateStore *replicationStateStore
|
||||
}
|
||||
|
||||
// NewClient returns a new client for the given configuration.
|
||||
@ -473,6 +511,10 @@ func NewClient(c *Config) (*Client, error) {
|
||||
headers: make(http.Header),
|
||||
}
|
||||
|
||||
if c.ReadYourWrites {
|
||||
client.replicationStateStore = &replicationStateStore{}
|
||||
}
|
||||
|
||||
// Add the VaultRequest SSRF protection header
|
||||
client.headers[consts.RequestHeaderName] = []string{"true"}
|
||||
|
||||
@ -504,6 +546,8 @@ func (c *Client) CloneConfig() *Config {
|
||||
newConfig.Limiter = c.config.Limiter
|
||||
newConfig.OutputCurlString = c.config.OutputCurlString
|
||||
newConfig.SRVLookup = c.config.SRVLookup
|
||||
newConfig.CloneHeaders = c.config.CloneHeaders
|
||||
newConfig.ReadYourWrites = c.config.ReadYourWrites
|
||||
|
||||
// we specifically want a _copy_ of the client here, not a pointer to the original one
|
||||
newClient := *c.config.HttpClient
|
||||
@ -809,6 +853,52 @@ func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) {
|
||||
c.config.Logger = logger
|
||||
}
|
||||
|
||||
// SetCloneHeaders to allow headers to be copied whenever the client is cloned.
|
||||
func (c *Client) SetCloneHeaders(cloneHeaders bool) {
|
||||
c.modifyLock.Lock()
|
||||
defer c.modifyLock.Unlock()
|
||||
c.config.modifyLock.Lock()
|
||||
defer c.config.modifyLock.Unlock()
|
||||
|
||||
c.config.CloneHeaders = cloneHeaders
|
||||
}
|
||||
|
||||
// CloneHeaders gets the configured CloneHeaders value.
|
||||
func (c *Client) CloneHeaders() bool {
|
||||
c.modifyLock.RLock()
|
||||
defer c.modifyLock.RUnlock()
|
||||
c.config.modifyLock.RLock()
|
||||
defer c.config.modifyLock.RUnlock()
|
||||
|
||||
return c.config.CloneHeaders
|
||||
}
|
||||
|
||||
// SetReadYourWrites to prevent reading stale cluster replication state.
|
||||
func (c *Client) SetReadYourWrites(preventStaleReads bool) {
|
||||
c.modifyLock.Lock()
|
||||
defer c.modifyLock.Unlock()
|
||||
c.config.modifyLock.Lock()
|
||||
defer c.config.modifyLock.Unlock()
|
||||
|
||||
if preventStaleReads && c.replicationStateStore == nil {
|
||||
c.replicationStateStore = &replicationStateStore{}
|
||||
} else {
|
||||
c.replicationStateStore = nil
|
||||
}
|
||||
|
||||
c.config.ReadYourWrites = preventStaleReads
|
||||
}
|
||||
|
||||
// ReadYourWrites gets the configured value of ReadYourWrites
|
||||
func (c *Client) ReadYourWrites() bool {
|
||||
c.modifyLock.RLock()
|
||||
defer c.modifyLock.RUnlock()
|
||||
c.config.modifyLock.RLock()
|
||||
defer c.config.modifyLock.RUnlock()
|
||||
|
||||
return c.config.ReadYourWrites
|
||||
}
|
||||
|
||||
// Clone creates a new client with the same configuration. Note that the same
|
||||
// underlying http.Client is used; modifying the client from more than one
|
||||
// goroutine at once may not be safe, so modify the client as needed and then
|
||||
@ -839,12 +929,20 @@ func (c *Client) Clone() (*Client, error) {
|
||||
OutputCurlString: config.OutputCurlString,
|
||||
AgentAddress: config.AgentAddress,
|
||||
SRVLookup: config.SRVLookup,
|
||||
CloneHeaders: config.CloneHeaders,
|
||||
ReadYourWrites: config.ReadYourWrites,
|
||||
}
|
||||
client, err := NewClient(newConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.CloneHeaders {
|
||||
client.SetHeaders(c.Headers().Clone())
|
||||
}
|
||||
|
||||
client.replicationStateStore = c.replicationStateStore
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@ -950,6 +1048,10 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon
|
||||
cb(r)
|
||||
}
|
||||
|
||||
if c.config.ReadYourWrites {
|
||||
c.replicationStateStore.requireState(r)
|
||||
}
|
||||
|
||||
if limiter != nil {
|
||||
limiter.Wait(ctx)
|
||||
}
|
||||
@ -1060,6 +1162,10 @@ START:
|
||||
for _, cb := range c.responseCallbacks {
|
||||
cb(result)
|
||||
}
|
||||
|
||||
if c.config.ReadYourWrites {
|
||||
c.replicationStateStore.recordState(result)
|
||||
}
|
||||
}
|
||||
if err := result.Error(); err != nil {
|
||||
return result, err
|
||||
@ -1101,7 +1207,7 @@ func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client {
|
||||
// by Vault in a response header.
|
||||
func RecordState(state *string) ResponseCallback {
|
||||
return func(resp *Response) {
|
||||
*state = resp.Header.Get("X-Vault-Index")
|
||||
*state = resp.Header.Get(HeaderIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1111,11 +1217,111 @@ func RecordState(state *string) ResponseCallback {
|
||||
func RequireState(states ...string) RequestCallback {
|
||||
return func(req *Request) {
|
||||
for _, s := range states {
|
||||
req.Headers.Add("X-Vault-Index", s)
|
||||
req.Headers.Add(HeaderIndex, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compareReplicationStates returns 1 if s1 is newer or identical, -1 if s1 is older, and 0
|
||||
// if neither s1 or s2 is strictly greater. An error is returned if s1 or s2
|
||||
// are invalid or from different clusters.
|
||||
func compareReplicationStates(s1, s2 string) (int, error) {
|
||||
w1, err := ParseReplicationState(s1, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w2, err := ParseReplicationState(s2, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if w1.ClusterID != w2.ClusterID {
|
||||
return 0, fmt.Errorf("can't compare replication states with different ClusterIDs")
|
||||
}
|
||||
|
||||
switch {
|
||||
case w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex:
|
||||
return 1, nil
|
||||
// We've already handled the case where both are equal above, so really we're
|
||||
// asking here if one or both are lesser.
|
||||
case w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex:
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// MergeReplicationStates returns a merged array of replication states by iterating
|
||||
// through all states in `old`. An iterated state is merged to the result before `new`
|
||||
// based on the result of compareReplicationStates
|
||||
func MergeReplicationStates(old []string, new string) []string {
|
||||
if len(old) == 0 || len(old) > 2 {
|
||||
return []string{new}
|
||||
}
|
||||
|
||||
var ret []string
|
||||
for _, o := range old {
|
||||
c, err := compareReplicationStates(o, new)
|
||||
if err != nil {
|
||||
return []string{new}
|
||||
}
|
||||
switch c {
|
||||
case 1:
|
||||
ret = append(ret, o)
|
||||
case -1:
|
||||
ret = append(ret, new)
|
||||
case 0:
|
||||
ret = append(ret, o, new)
|
||||
}
|
||||
}
|
||||
return strutil.RemoveDuplicates(ret, false)
|
||||
}
|
||||
|
||||
func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) {
|
||||
cooked, err := base64.StdEncoding.DecodeString(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(cooked)
|
||||
|
||||
lastIndex := strings.LastIndexByte(s, ':')
|
||||
if lastIndex == -1 {
|
||||
return nil, fmt.Errorf("invalid full state header format")
|
||||
}
|
||||
state, stateHMACRaw := s[:lastIndex], s[lastIndex+1:]
|
||||
stateHMAC, err := hex.DecodeString(stateHMACRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid state header HMAC: %v, %w", stateHMACRaw, err)
|
||||
}
|
||||
|
||||
if len(hmacKey) != 0 {
|
||||
hm := hmac.New(sha256.New, hmacKey)
|
||||
hm.Write([]byte(state))
|
||||
if !hmac.Equal(hm.Sum(nil), stateHMAC) {
|
||||
return nil, fmt.Errorf("invalid state header HMAC (mismatch)")
|
||||
}
|
||||
}
|
||||
|
||||
pieces := strings.Split(state, ":")
|
||||
if len(pieces) != 4 || pieces[0] != "v1" || pieces[1] == "" {
|
||||
return nil, fmt.Errorf("invalid state header format")
|
||||
}
|
||||
localIndex, err := strconv.ParseUint(pieces[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid local index in state header: %w", err)
|
||||
}
|
||||
replicatedIndex, err := strconv.ParseUint(pieces[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid replicated index in state header: %w", err)
|
||||
}
|
||||
|
||||
return &logical.WALState{
|
||||
ClusterID: pieces[1],
|
||||
LocalIndex: localIndex,
|
||||
ReplicatedIndex: replicatedIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ForwardInconsistent returns a request callback that will add a request
|
||||
// header which says: if the state required isn't present on the node receiving
|
||||
// this request, forward it to the active node. This should be used in
|
||||
@ -1149,3 +1355,39 @@ func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bo
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// replicationStateStore is used to track cluster replication states
|
||||
// in order to ensure proper read-after-write semantics for a Client.
|
||||
type replicationStateStore struct {
|
||||
m sync.RWMutex
|
||||
store []string
|
||||
}
|
||||
|
||||
// recordState updates the store's replication states with the merger of all
|
||||
// states.
|
||||
func (w *replicationStateStore) recordState(resp *Response) {
|
||||
w.m.Lock()
|
||||
defer w.m.Unlock()
|
||||
newState := resp.Header.Get(HeaderIndex)
|
||||
if newState != "" {
|
||||
w.store = MergeReplicationStates(w.store, newState)
|
||||
}
|
||||
}
|
||||
|
||||
// requireState updates the Request with the store's current replication states.
|
||||
func (w *replicationStateStore) requireState(req *Request) {
|
||||
w.m.RLock()
|
||||
defer w.m.RUnlock()
|
||||
for _, s := range w.store {
|
||||
req.Headers.Add(HeaderIndex, s)
|
||||
}
|
||||
}
|
||||
|
||||
// states currently stored.
|
||||
func (w *replicationStateStore) states() []string {
|
||||
w.m.RLock()
|
||||
defer w.m.RUnlock()
|
||||
c := make([]string, len(w.store))
|
||||
copy(c, w.store)
|
||||
return c
|
||||
}
|
||||
|
12
vendor/github.com/hashicorp/vault/api/go.mod
generated
vendored
12
vendor/github.com/hashicorp/vault/api/go.mod
generated
vendored
@ -6,17 +6,19 @@ replace github.com/hashicorp/vault/sdk => ../sdk
|
||||
|
||||
require (
|
||||
github.com/cenkalti/backoff/v3 v3.0.0
|
||||
github.com/frankban/quicktest v1.13.0 // indirect
|
||||
github.com/go-test/deep v1.0.2
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/errwrap v1.1.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-hclog v0.16.1
|
||||
github.com/hashicorp/go-multierror v1.1.0
|
||||
github.com/hashicorp/go-hclog v0.16.2
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6
|
||||
github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/hashicorp/vault/sdk v0.2.1
|
||||
github.com/mitchellh/mapstructure v1.3.2
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
|
||||
github.com/mitchellh/mapstructure v1.4.2
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
|
||||
gopkg.in/square/go-jose.v2 v2.5.1
|
||||
)
|
||||
|
225
vendor/github.com/hashicorp/vault/api/go.sum
generated
vendored
225
vendor/github.com/hashicorp/vault/api/go.sum
generated
vendored
@ -1,18 +1,17 @@
|
||||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
@ -25,57 +24,44 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE=
|
||||
github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
|
||||
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
@ -83,66 +69,81 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
|
||||
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM=
|
||||
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@ -158,38 +159,31 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@ -204,116 +198,105 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -323,25 +306,21 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
2
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
2
vendor/github.com/hashicorp/vault/api/lifetime_watcher.go
generated
vendored
@ -377,7 +377,7 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool,
|
||||
// assumptions given the total lease time; it also adds some jitter to not have
|
||||
// clients be in sync.
|
||||
func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) {
|
||||
if leaseDuration == 0 {
|
||||
if leaseDuration <= 0 {
|
||||
r.grace = 0
|
||||
return
|
||||
}
|
||||
|
34
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
34
vendor/github.com/hashicorp/vault/api/logical.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
@ -81,7 +82,7 @@ func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret,
|
||||
case io.EOF:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
return nil, parseErr
|
||||
}
|
||||
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
|
||||
return secret, nil
|
||||
@ -115,7 +116,7 @@ func (c *Logical) List(path string) (*Secret, error) {
|
||||
case io.EOF:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
return nil, parseErr
|
||||
}
|
||||
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
|
||||
return secret, nil
|
||||
@ -130,24 +131,37 @@ func (c *Logical) List(path string) (*Secret, error) {
|
||||
}
|
||||
|
||||
func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
||||
r := c.c.NewRequest("PUT", "/v1/"+path)
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.write(path, r)
|
||||
return c.write(ctx, path, r)
|
||||
}
|
||||
|
||||
func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) {
|
||||
r := c.c.NewRequest("PATCH", "/v1/"+path)
|
||||
r.Headers = http.Header{
|
||||
"Content-Type": []string{"application/merge-patch+json"},
|
||||
}
|
||||
if err := r.SetJSONBody(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.write(ctx, path, r)
|
||||
}
|
||||
|
||||
func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) {
|
||||
r := c.c.NewRequest("PUT", "/v1/"+path)
|
||||
r.BodyBytes = data
|
||||
|
||||
return c.write(path, r)
|
||||
return c.write(context.Background(), path, r)
|
||||
}
|
||||
|
||||
func (c *Logical) write(path string, request *Request) (*Secret, error) {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) {
|
||||
resp, err := c.c.RawRequestWithContext(ctx, request)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
@ -159,7 +173,7 @@ func (c *Logical) write(path string, request *Request) (*Secret, error) {
|
||||
case io.EOF:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
return nil, parseErr
|
||||
}
|
||||
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
|
||||
return secret, err
|
||||
@ -206,7 +220,7 @@ func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret
|
||||
case io.EOF:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
return nil, parseErr
|
||||
}
|
||||
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
|
||||
return secret, err
|
||||
@ -259,7 +273,7 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
|
||||
case io.EOF:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, err
|
||||
return nil, parseErr
|
||||
}
|
||||
if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
|
||||
return secret, nil
|
||||
|
19
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
19
vendor/github.com/hashicorp/vault/api/response.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
)
|
||||
|
||||
@ -41,12 +42,14 @@ func (r *Response) Error() error {
|
||||
|
||||
r.Body.Close()
|
||||
r.Body = ioutil.NopCloser(bodyBuf)
|
||||
ns := r.Header.Get(consts.NamespaceHeaderName)
|
||||
|
||||
// Build up the error object
|
||||
respErr := &ResponseError{
|
||||
HTTPMethod: r.Request.Method,
|
||||
URL: r.Request.URL.String(),
|
||||
StatusCode: r.StatusCode,
|
||||
HTTPMethod: r.Request.Method,
|
||||
URL: r.Request.URL.String(),
|
||||
StatusCode: r.StatusCode,
|
||||
NamespacePath: ns,
|
||||
}
|
||||
|
||||
// Decode the error response if we can. Note that we wrap the bodyBuf
|
||||
@ -92,6 +95,10 @@ type ResponseError struct {
|
||||
|
||||
// Errors are the underlying errors returned by Vault.
|
||||
Errors []string
|
||||
|
||||
// Namespace path to be reported to the client if it is set to anything other
|
||||
// than root
|
||||
NamespacePath string
|
||||
}
|
||||
|
||||
// Error returns a human-readable error string for the response error.
|
||||
@ -101,9 +108,15 @@ func (r *ResponseError) Error() string {
|
||||
errString = "Raw Message"
|
||||
}
|
||||
|
||||
var ns string
|
||||
if r.NamespacePath != "" && r.NamespacePath != "root/" {
|
||||
ns = "Namespace: " + r.NamespacePath + "\n"
|
||||
}
|
||||
|
||||
var errBody bytes.Buffer
|
||||
errBody.WriteString(fmt.Sprintf(
|
||||
"Error making API request.\n\n"+
|
||||
ns+
|
||||
"URL: %s %s\n"+
|
||||
"Code: %d. %s:\n\n",
|
||||
r.HTTPMethod, r.URL, r.StatusCode, errString))
|
||||
|
2
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
2
vendor/github.com/hashicorp/vault/api/secret.go
generated
vendored
@ -7,8 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/parseutil"
|
||||
)
|
||||
|
||||
// Secret is the structure returned for every secret within Vault.
|
||||
|
63
vendor/github.com/hashicorp/vault/api/sys_raft.go
generated
vendored
63
vendor/github.com/hashicorp/vault/api/sys_raft.go
generated
vendored
@ -1,21 +1,25 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/parseutil"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
var ErrIncompleteSnapshot = errors.New("incomplete snapshot, unable to read SHA256SUMS.sealed file")
|
||||
|
||||
// RaftJoinResponse represents the response of the raft join API
|
||||
type RaftJoinResponse struct {
|
||||
Joined bool `json:"joined"`
|
||||
@ -210,11 +214,60 @@ func (c *Sys) RaftSnapshot(snapWriter io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(snapWriter, resp.Body)
|
||||
// Make sure that the last file in the archive, SHA256SUMS.sealed, is present
|
||||
// and non-empty. This is to catch cases where the snapshot failed midstream,
|
||||
// e.g. due to a problem with the seal that prevented encryption of that file.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
var verified bool
|
||||
|
||||
rPipe, wPipe := io.Pipe()
|
||||
dup := io.TeeReader(resp.Body, wPipe)
|
||||
go func() {
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, rPipe)
|
||||
rPipe.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
uncompressed, err := gzip.NewReader(rPipe)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := tar.NewReader(uncompressed)
|
||||
var h *tar.Header
|
||||
for {
|
||||
h, err = t.Next()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if h.Name != "SHA256SUMS.sealed" {
|
||||
continue
|
||||
}
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(t)
|
||||
if err != nil || len(b) == 0 {
|
||||
return
|
||||
}
|
||||
verified = true
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy bytes from dup to snapWriter. This will have a side effect that
|
||||
// everything read from dup will be written to wPipe.
|
||||
_, err = io.Copy(snapWriter, dup)
|
||||
wPipe.Close()
|
||||
if err != nil {
|
||||
rPipe.CloseWithError(err)
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if !verified {
|
||||
return ErrIncompleteSnapshot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
922
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
Normal file
922
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
Normal file
@ -0,0 +1,922 @@
|
||||
package certutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/sdk/helper/errutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"golang.org/x/crypto/cryptobyte"
|
||||
cbasn1 "golang.org/x/crypto/cryptobyte/asn1"
|
||||
)
|
||||
|
||||
// GetHexFormatted returns the byte buffer formatted in hex with
|
||||
// the specified separator between bytes.
|
||||
func GetHexFormatted(buf []byte, sep string) string {
|
||||
var ret bytes.Buffer
|
||||
for _, cur := range buf {
|
||||
if ret.Len() > 0 {
|
||||
fmt.Fprintf(&ret, sep)
|
||||
}
|
||||
fmt.Fprintf(&ret, "%02x", cur)
|
||||
}
|
||||
return ret.String()
|
||||
}
|
||||
|
||||
// ParseHexFormatted returns the raw bytes from a formatted hex string
|
||||
func ParseHexFormatted(in, sep string) []byte {
|
||||
var ret bytes.Buffer
|
||||
var err error
|
||||
var inBits int64
|
||||
inBytes := strings.Split(in, sep)
|
||||
for _, inByte := range inBytes {
|
||||
if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil {
|
||||
return nil
|
||||
}
|
||||
ret.WriteByte(byte(inBits))
|
||||
}
|
||||
return ret.Bytes()
|
||||
}
|
||||
|
||||
// GetSubjKeyID returns the subject key ID, e.g. the SHA1 sum
|
||||
// of the marshaled public key
|
||||
func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) {
|
||||
if privateKey == nil {
|
||||
return nil, errutil.InternalError{Err: "passed-in private key is nil"}
|
||||
}
|
||||
|
||||
marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public())
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
|
||||
}
|
||||
|
||||
subjKeyID := sha1.Sum(marshaledKey)
|
||||
|
||||
return subjKeyID[:], nil
|
||||
}
|
||||
|
||||
// ParsePKIMap takes a map (for instance, the Secret.Data
|
||||
// returned from the PKI backend) and returns a ParsedCertBundle.
|
||||
func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) {
|
||||
result := &CertBundle{}
|
||||
err := mapstructure.Decode(data, result)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: err.Error()}
|
||||
}
|
||||
|
||||
return result.ToParsedCertBundle()
|
||||
}
|
||||
|
||||
// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle.
|
||||
//
|
||||
// This can be either the output of an
|
||||
// issue call from the PKI backend or just its data member; or,
|
||||
// JSON not coming from the PKI backend.
|
||||
func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) {
|
||||
result := &CertBundle{}
|
||||
err := jsonutil.DecodeJSON(input, &result)
|
||||
|
||||
if err == nil {
|
||||
return result.ToParsedCertBundle()
|
||||
}
|
||||
|
||||
var secret Secret
|
||||
err = jsonutil.DecodeJSON(input, &secret)
|
||||
|
||||
if err == nil {
|
||||
return ParsePKIMap(secret.Data)
|
||||
}
|
||||
|
||||
return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"}
|
||||
}
|
||||
|
||||
// ParsePEMBundle takes a string of concatenated PEM-format certificate
|
||||
// and private key values and decodes/parses them, checking validity along
|
||||
// the way. The first certificate must be the subject certificate and issuing
|
||||
// certificates may follow. There must be at most one private key.
|
||||
func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) {
|
||||
if len(pemBundle) == 0 {
|
||||
return nil, errutil.UserError{Err: "empty pem bundle"}
|
||||
}
|
||||
|
||||
pemBytes := []byte(pemBundle)
|
||||
var pemBlock *pem.Block
|
||||
parsedBundle := &ParsedCertBundle{}
|
||||
var certPath []*CertBlock
|
||||
|
||||
for len(pemBytes) > 0 {
|
||||
pemBlock, pemBytes = pem.Decode(pemBytes)
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "no data found in PEM block"}
|
||||
}
|
||||
|
||||
if signer, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil {
|
||||
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
|
||||
return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"}
|
||||
}
|
||||
parsedBundle.PrivateKeyFormat = ECBlock
|
||||
parsedBundle.PrivateKeyType = ECPrivateKey
|
||||
parsedBundle.PrivateKeyBytes = pemBlock.Bytes
|
||||
parsedBundle.PrivateKey = signer
|
||||
|
||||
} else if signer, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil {
|
||||
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
|
||||
return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"}
|
||||
}
|
||||
parsedBundle.PrivateKeyType = RSAPrivateKey
|
||||
parsedBundle.PrivateKeyFormat = PKCS1Block
|
||||
parsedBundle.PrivateKeyBytes = pemBlock.Bytes
|
||||
parsedBundle.PrivateKey = signer
|
||||
} else if signer, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil {
|
||||
parsedBundle.PrivateKeyFormat = PKCS8Block
|
||||
|
||||
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
|
||||
return nil, errutil.UserError{Err: "More than one private key given; provide only one private key in the bundle"}
|
||||
}
|
||||
switch signer := signer.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
parsedBundle.PrivateKey = signer
|
||||
parsedBundle.PrivateKeyType = RSAPrivateKey
|
||||
parsedBundle.PrivateKeyBytes = pemBlock.Bytes
|
||||
case *ecdsa.PrivateKey:
|
||||
parsedBundle.PrivateKey = signer
|
||||
parsedBundle.PrivateKeyType = ECPrivateKey
|
||||
parsedBundle.PrivateKeyBytes = pemBlock.Bytes
|
||||
}
|
||||
} else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil {
|
||||
certPath = append(certPath, &CertBlock{
|
||||
Certificate: certificates[0],
|
||||
Bytes: pemBlock.Bytes,
|
||||
})
|
||||
} else if x509.IsEncryptedPEMBlock(pemBlock) {
|
||||
return nil, errutil.UserError{Err: "Encrypted private key given; provide only decrypted private key in the bundle"}
|
||||
}
|
||||
}
|
||||
|
||||
for i, certBlock := range certPath {
|
||||
if i == 0 {
|
||||
parsedBundle.Certificate = certBlock.Certificate
|
||||
parsedBundle.CertificateBytes = certBlock.Bytes
|
||||
} else {
|
||||
parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock)
|
||||
}
|
||||
}
|
||||
|
||||
if err := parsedBundle.Verify(); err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)}
|
||||
}
|
||||
|
||||
return parsedBundle, nil
|
||||
}
|
||||
|
||||
// GeneratePrivateKey generates a private key with the specified type and key bits.
|
||||
func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error {
|
||||
return generatePrivateKey(keyType, keyBits, container, nil)
|
||||
}
|
||||
|
||||
// GeneratePrivateKeyWithRandomSource generates a private key with the specified type and key bits.
|
||||
// GeneratePrivateKeyWithRandomSource uses randomness from the entropyReader to generate the private key.
|
||||
func GeneratePrivateKeyWithRandomSource(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error {
|
||||
return generatePrivateKey(keyType, keyBits, container, entropyReader)
|
||||
}
|
||||
|
||||
// generatePrivateKey generates a private key with the specified type and key bits.
|
||||
// generatePrivateKey uses randomness from the entropyReader to generate the private key.
|
||||
func generatePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error {
|
||||
var err error
|
||||
var privateKeyType PrivateKeyType
|
||||
var privateKeyBytes []byte
|
||||
var privateKey crypto.Signer
|
||||
|
||||
var randReader io.Reader = rand.Reader
|
||||
if entropyReader != nil {
|
||||
randReader = entropyReader
|
||||
}
|
||||
|
||||
switch keyType {
|
||||
case "rsa":
|
||||
privateKeyType = RSAPrivateKey
|
||||
privateKey, err = rsa.GenerateKey(randReader, keyBits)
|
||||
if err != nil {
|
||||
return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)}
|
||||
}
|
||||
privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey))
|
||||
case "ec":
|
||||
privateKeyType = ECPrivateKey
|
||||
var curve elliptic.Curve
|
||||
switch keyBits {
|
||||
case 224:
|
||||
curve = elliptic.P224()
|
||||
case 256:
|
||||
curve = elliptic.P256()
|
||||
case 384:
|
||||
curve = elliptic.P384()
|
||||
case 521:
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)}
|
||||
}
|
||||
privateKey, err = ecdsa.GenerateKey(curve, randReader)
|
||||
if err != nil {
|
||||
return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)}
|
||||
}
|
||||
privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey))
|
||||
if err != nil {
|
||||
return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)}
|
||||
}
|
||||
default:
|
||||
return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)}
|
||||
}
|
||||
|
||||
container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateSerialNumber generates a serial number suitable for a certificate
|
||||
func GenerateSerialNumber() (*big.Int, error) {
|
||||
return generateSerialNumber(rand.Reader)
|
||||
}
|
||||
|
||||
// GenerateSerialNumberWithRandomSource generates a serial number suitable
|
||||
// for a certificate with custom entropy.
|
||||
func GenerateSerialNumberWithRandomSource(randReader io.Reader) (*big.Int, error) {
|
||||
return generateSerialNumber(randReader)
|
||||
}
|
||||
|
||||
func generateSerialNumber(randReader io.Reader) (*big.Int, error) {
|
||||
serial, err := rand.Int(randReader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil))
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)}
|
||||
}
|
||||
return serial, nil
|
||||
}
|
||||
|
||||
// ComparePublicKeys compares two public keys and returns true if they match
|
||||
func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) {
|
||||
switch key1Iface.(type) {
|
||||
case *rsa.PublicKey:
|
||||
key1 := key1Iface.(*rsa.PublicKey)
|
||||
key2, ok := key2Iface.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface)
|
||||
}
|
||||
if key1.N.Cmp(key2.N) != 0 ||
|
||||
key1.E != key2.E {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
case *ecdsa.PublicKey:
|
||||
key1 := key1Iface.(*ecdsa.PublicKey)
|
||||
key2, ok := key2Iface.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface)
|
||||
}
|
||||
if key1.X.Cmp(key2.X) != 0 ||
|
||||
key1.Y.Cmp(key2.Y) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
key1Params := key1.Params()
|
||||
key2Params := key2.Params()
|
||||
if key1Params.P.Cmp(key2Params.P) != 0 ||
|
||||
key1Params.N.Cmp(key2Params.N) != 0 ||
|
||||
key1Params.B.Cmp(key2Params.B) != 0 ||
|
||||
key1Params.Gx.Cmp(key2Params.Gx) != 0 ||
|
||||
key1Params.Gy.Cmp(key2Params.Gy) != 0 ||
|
||||
key1Params.BitSize != key2Params.BitSize {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
return false, fmt.Errorf("cannot compare key with type %T", key1Iface)
|
||||
}
|
||||
}
|
||||
|
||||
// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs
|
||||
func ParsePublicKeyPEM(data []byte) (interface{}, error) {
|
||||
block, data := pem.Decode(data)
|
||||
if block != nil {
|
||||
var rawKey interface{}
|
||||
var err error
|
||||
if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
rawKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok {
|
||||
return rsaPublicKey, nil
|
||||
}
|
||||
if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok {
|
||||
return ecPublicKey, nil
|
||||
}
|
||||
if edPublicKey, ok := rawKey.(ed25519.PublicKey); ok {
|
||||
return edPublicKey, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("data does not contain any valid public keys")
|
||||
}
|
||||
|
||||
// addPolicyIdentifiers adds certificate policies extension
|
||||
//
|
||||
func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) {
|
||||
for _, oidstr := range data.Params.PolicyIdentifiers {
|
||||
oid, err := StringToOid(oidstr)
|
||||
if err == nil {
|
||||
certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addExtKeyUsageOids adds custom extended key usage OIDs to certificate
|
||||
func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) {
|
||||
for _, oidstr := range data.Params.ExtKeyUsageOIDs {
|
||||
oid, err := StringToOid(oidstr)
|
||||
if err == nil {
|
||||
certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error {
|
||||
certTemplate := &x509.Certificate{
|
||||
DNSNames: in.DNSNames,
|
||||
IPAddresses: in.IPAddresses,
|
||||
EmailAddresses: in.EmailAddresses,
|
||||
URIs: in.URIs,
|
||||
}
|
||||
if err := HandleOtherSANs(certTemplate, sans); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(certTemplate.ExtraExtensions) > 0 {
|
||||
for _, v := range certTemplate.ExtraExtensions {
|
||||
in.ExtraExtensions = append(in.ExtraExtensions, v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error {
|
||||
// If other SANs is empty we return which causes normal Go stdlib parsing
|
||||
// of the other SAN types
|
||||
if len(sans) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rawValues []asn1.RawValue
|
||||
|
||||
// We need to generate an IMPLICIT sequence for compatibility with OpenSSL
|
||||
// -- it's an open question what the default for RFC 5280 actually is, see
|
||||
// https://github.com/openssl/openssl/issues/5091 -- so we have to use
|
||||
// cryptobyte because using the asn1 package's marshaling always produces
|
||||
// an EXPLICIT sequence. Note that asn1 is way too magical according to
|
||||
// agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into
|
||||
// boringssl.
|
||||
for oid, vals := range sans {
|
||||
for _, val := range vals {
|
||||
var b cryptobyte.Builder
|
||||
oidStr, err := StringToOid(oid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.AddASN1ObjectIdentifier(oidStr)
|
||||
b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
|
||||
b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes([]byte(val))
|
||||
})
|
||||
})
|
||||
m, err := b.Bytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m})
|
||||
}
|
||||
}
|
||||
|
||||
// If other SANs is empty we return which causes normal Go stdlib parsing
|
||||
// of the other SAN types
|
||||
if len(rawValues) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append any existing SANs, sans marshalling
|
||||
rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...)
|
||||
|
||||
// Marshal and add to ExtraExtensions
|
||||
ext := pkix.Extension{
|
||||
// This is the defined OID for subjectAltName
|
||||
Id: asn1.ObjectIdentifier{2, 5, 29, 17},
|
||||
}
|
||||
var err error
|
||||
ext.Value, err = asn1.Marshal(rawValues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in.ExtraExtensions = append(in.ExtraExtensions, ext)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note: Taken from the Go source code since it's not public, and used in the
|
||||
// modified function below (which also uses these consts upstream)
|
||||
const (
|
||||
nameTypeEmail = 1
|
||||
nameTypeDNS = 2
|
||||
nameTypeURI = 6
|
||||
nameTypeIP = 7
|
||||
)
|
||||
|
||||
// Note: Taken from the Go source code since it's not public, plus changed to not marshal
|
||||
// marshalSANs marshals a list of addresses into a the contents of an X.509
|
||||
// SubjectAlternativeName extension.
|
||||
func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue {
|
||||
var rawValues []asn1.RawValue
|
||||
for _, name := range dnsNames {
|
||||
rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)})
|
||||
}
|
||||
for _, email := range emailAddresses {
|
||||
rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)})
|
||||
}
|
||||
for _, rawIP := range ipAddresses {
|
||||
// If possible, we always want to encode IPv4 addresses in 4 bytes.
|
||||
ip := rawIP.To4()
|
||||
if ip == nil {
|
||||
ip = rawIP
|
||||
}
|
||||
rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip})
|
||||
}
|
||||
for _, uri := range uris {
|
||||
rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())})
|
||||
}
|
||||
return rawValues
|
||||
}
|
||||
|
||||
func StringToOid(in string) (asn1.ObjectIdentifier, error) {
|
||||
split := strings.Split(in, ".")
|
||||
ret := make(asn1.ObjectIdentifier, 0, len(split))
|
||||
for _, v := range split {
|
||||
i, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, i)
|
||||
}
|
||||
return asn1.ObjectIdentifier(ret), nil
|
||||
}
|
||||
|
||||
func ValidateKeyTypeLength(keyType string, keyBits int) error {
|
||||
switch keyType {
|
||||
case "rsa":
|
||||
switch keyBits {
|
||||
case 2048:
|
||||
case 3072:
|
||||
case 4096:
|
||||
case 8192:
|
||||
default:
|
||||
return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits)
|
||||
}
|
||||
case "ec":
|
||||
switch keyBits {
|
||||
case 224:
|
||||
case 256:
|
||||
case 384:
|
||||
case 521:
|
||||
default:
|
||||
return fmt.Errorf("unsupported bit length for EC key: %d", keyBits)
|
||||
}
|
||||
case "any":
|
||||
default:
|
||||
return fmt.Errorf("unknown key type %s", keyType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateCertificate uses CreationBundle and the default rand.Reader to
|
||||
// generate a cert/keypair.
|
||||
func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) {
|
||||
return createCertificate(data, rand.Reader)
|
||||
}
|
||||
|
||||
// CreateCertificateWithRandomSource uses CreationBundle and a custom
|
||||
// io.Reader for randomness to generate a cert/keypair.
|
||||
func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) {
|
||||
return createCertificate(data, randReader)
|
||||
}
|
||||
|
||||
func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) {
|
||||
var err error
|
||||
result := &ParsedCertBundle{}
|
||||
|
||||
serialNumber, err := GenerateSerialNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := generatePrivateKey(data.Params.KeyType,
|
||||
data.Params.KeyBits,
|
||||
result, randReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subjKeyID, err := GetSubjKeyID(result.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)}
|
||||
}
|
||||
|
||||
certTemplate := &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
NotBefore: time.Now().Add(-30 * time.Second),
|
||||
NotAfter: data.Params.NotAfter,
|
||||
IsCA: false,
|
||||
SubjectKeyId: subjKeyID,
|
||||
Subject: data.Params.Subject,
|
||||
DNSNames: data.Params.DNSNames,
|
||||
EmailAddresses: data.Params.EmailAddresses,
|
||||
IPAddresses: data.Params.IPAddresses,
|
||||
URIs: data.Params.URIs,
|
||||
}
|
||||
if data.Params.NotBeforeDuration > 0 {
|
||||
certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration)
|
||||
}
|
||||
|
||||
if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil {
|
||||
return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()}
|
||||
}
|
||||
|
||||
// Add this before calling addKeyUsages
|
||||
if data.SigningBundle == nil {
|
||||
certTemplate.IsCA = true
|
||||
} else if data.Params.BasicConstraintsValidForNonCA {
|
||||
certTemplate.BasicConstraintsValid = true
|
||||
certTemplate.IsCA = false
|
||||
}
|
||||
|
||||
// This will only be filled in from the generation paths
|
||||
if len(data.Params.PermittedDNSDomains) > 0 {
|
||||
certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains
|
||||
certTemplate.PermittedDNSDomainsCritical = true
|
||||
}
|
||||
|
||||
AddPolicyIdentifiers(data, certTemplate)
|
||||
|
||||
AddKeyUsages(data, certTemplate)
|
||||
|
||||
AddExtKeyUsageOids(data, certTemplate)
|
||||
|
||||
certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates
|
||||
certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints
|
||||
certTemplate.OCSPServer = data.Params.URLs.OCSPServers
|
||||
|
||||
var certBytes []byte
|
||||
if data.SigningBundle != nil {
|
||||
switch data.SigningBundle.PrivateKeyType {
|
||||
case RSAPrivateKey:
|
||||
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case ECPrivateKey:
|
||||
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
}
|
||||
|
||||
caCert := data.SigningBundle.Certificate
|
||||
certTemplate.AuthorityKeyId = caCert.SubjectKeyId
|
||||
|
||||
certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey)
|
||||
} else {
|
||||
// Creating a self-signed root
|
||||
if data.Params.MaxPathLength == 0 {
|
||||
certTemplate.MaxPathLen = 0
|
||||
certTemplate.MaxPathLenZero = true
|
||||
} else {
|
||||
certTemplate.MaxPathLen = data.Params.MaxPathLength
|
||||
}
|
||||
|
||||
switch data.Params.KeyType {
|
||||
case "rsa":
|
||||
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case "ec":
|
||||
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
}
|
||||
|
||||
certTemplate.AuthorityKeyId = subjKeyID
|
||||
certTemplate.BasicConstraintsValid = true
|
||||
certBytes, err = x509.CreateCertificate(randReader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
|
||||
}
|
||||
|
||||
result.CertificateBytes = certBytes
|
||||
result.Certificate, err = x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
|
||||
}
|
||||
|
||||
if data.SigningBundle != nil {
|
||||
if len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 &&
|
||||
!bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) {
|
||||
|
||||
result.CAChain = []*CertBlock{
|
||||
{
|
||||
Certificate: data.SigningBundle.Certificate,
|
||||
Bytes: data.SigningBundle.CertificateBytes,
|
||||
},
|
||||
}
|
||||
result.CAChain = append(result.CAChain, data.SigningBundle.CAChain...)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var oidExtensionBasicConstraints = []int{2, 5, 29, 19}
|
||||
|
||||
// CreateCSR creates a CSR with the default rand.Reader to
|
||||
// generate a cert/keypair. This is currently only meant
|
||||
// for use when generating an intermediate certificate.
|
||||
func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) {
|
||||
return createCSR(data, addBasicConstraints, rand.Reader)
|
||||
}
|
||||
|
||||
// CreateCSRWithRandomSource creates a CSR with a custom io.Reader
|
||||
// for randomness to generate a cert/keypair.
|
||||
func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) {
|
||||
return createCSR(data, addBasicConstraints, randReader)
|
||||
}
|
||||
|
||||
func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) {
|
||||
var err error
|
||||
result := &ParsedCSRBundle{}
|
||||
|
||||
if err := generatePrivateKey(data.Params.KeyType,
|
||||
data.Params.KeyBits,
|
||||
result, randReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Like many root CAs, other information is ignored
|
||||
csrTemplate := &x509.CertificateRequest{
|
||||
Subject: data.Params.Subject,
|
||||
DNSNames: data.Params.DNSNames,
|
||||
EmailAddresses: data.Params.EmailAddresses,
|
||||
IPAddresses: data.Params.IPAddresses,
|
||||
URIs: data.Params.URIs,
|
||||
}
|
||||
|
||||
if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil {
|
||||
return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()}
|
||||
}
|
||||
|
||||
if addBasicConstraints {
|
||||
type basicConstraints struct {
|
||||
IsCA bool `asn1:"optional"`
|
||||
MaxPathLen int `asn1:"optional,default:-1"`
|
||||
}
|
||||
val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1})
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()}
|
||||
}
|
||||
ext := pkix.Extension{
|
||||
Id: oidExtensionBasicConstraints,
|
||||
Value: val,
|
||||
Critical: true,
|
||||
}
|
||||
csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext)
|
||||
}
|
||||
|
||||
switch data.Params.KeyType {
|
||||
case "rsa":
|
||||
csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case "ec":
|
||||
csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
}
|
||||
|
||||
csr, err := x509.CreateCertificateRequest(randReader, csrTemplate, result.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
|
||||
}
|
||||
|
||||
result.CSRBytes = csr
|
||||
result.CSR, err = x509.ParseCertificateRequest(csr)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SignCertificate performs the heavy lifting
|
||||
// of generating a certificate from a CSR.
|
||||
// Returns a ParsedCertBundle sans private keys.
|
||||
func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) {
|
||||
return signCertificate(data, rand.Reader)
|
||||
}
|
||||
|
||||
// SignCertificateWithRandomSource generates a certificate
|
||||
// from a CSR, using custom randomness from the randReader.
|
||||
// Returns a ParsedCertBundle sans private keys.
|
||||
func SignCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) {
|
||||
return signCertificate(data, randReader)
|
||||
}
|
||||
|
||||
func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) {
|
||||
switch {
|
||||
case data == nil:
|
||||
return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"}
|
||||
case data.Params == nil:
|
||||
return nil, errutil.UserError{Err: "nil parameters given to signCertificate"}
|
||||
case data.SigningBundle == nil:
|
||||
return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"}
|
||||
case data.CSR == nil:
|
||||
return nil, errutil.UserError{Err: "nil csr given to signCertificate"}
|
||||
}
|
||||
|
||||
err := data.CSR.CheckSignature()
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: "request signature invalid"}
|
||||
}
|
||||
|
||||
result := &ParsedCertBundle{}
|
||||
|
||||
serialNumber, err := GenerateSerialNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
marshaledKey, err := x509.MarshalPKIXPublicKey(data.CSR.PublicKey)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
|
||||
}
|
||||
subjKeyID := sha1.Sum(marshaledKey)
|
||||
|
||||
caCert := data.SigningBundle.Certificate
|
||||
|
||||
certTemplate := &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: data.Params.Subject,
|
||||
NotBefore: time.Now().Add(-30 * time.Second),
|
||||
NotAfter: data.Params.NotAfter,
|
||||
SubjectKeyId: subjKeyID[:],
|
||||
AuthorityKeyId: caCert.SubjectKeyId,
|
||||
}
|
||||
if data.Params.NotBeforeDuration > 0 {
|
||||
certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration)
|
||||
}
|
||||
|
||||
switch data.SigningBundle.PrivateKeyType {
|
||||
case RSAPrivateKey:
|
||||
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case ECPrivateKey:
|
||||
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
}
|
||||
|
||||
if data.Params.UseCSRValues {
|
||||
certTemplate.Subject = data.CSR.Subject
|
||||
certTemplate.Subject.ExtraNames = certTemplate.Subject.Names
|
||||
|
||||
certTemplate.DNSNames = data.CSR.DNSNames
|
||||
certTemplate.EmailAddresses = data.CSR.EmailAddresses
|
||||
certTemplate.IPAddresses = data.CSR.IPAddresses
|
||||
certTemplate.URIs = data.CSR.URIs
|
||||
|
||||
for _, name := range data.CSR.Extensions {
|
||||
if !name.Id.Equal(oidExtensionBasicConstraints) {
|
||||
certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
certTemplate.DNSNames = data.Params.DNSNames
|
||||
certTemplate.EmailAddresses = data.Params.EmailAddresses
|
||||
certTemplate.IPAddresses = data.Params.IPAddresses
|
||||
certTemplate.URIs = data.Params.URIs
|
||||
}
|
||||
|
||||
if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil {
|
||||
return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()}
|
||||
}
|
||||
|
||||
AddPolicyIdentifiers(data, certTemplate)
|
||||
|
||||
AddKeyUsages(data, certTemplate)
|
||||
|
||||
AddExtKeyUsageOids(data, certTemplate)
|
||||
|
||||
var certBytes []byte
|
||||
|
||||
certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates
|
||||
certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints
|
||||
certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers
|
||||
|
||||
if data.Params.IsCA {
|
||||
certTemplate.BasicConstraintsValid = true
|
||||
certTemplate.IsCA = true
|
||||
|
||||
if data.SigningBundle.Certificate.MaxPathLen == 0 &&
|
||||
data.SigningBundle.Certificate.MaxPathLenZero {
|
||||
return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"}
|
||||
}
|
||||
|
||||
certTemplate.MaxPathLen = data.Params.MaxPathLength
|
||||
if certTemplate.MaxPathLen == 0 {
|
||||
certTemplate.MaxPathLenZero = true
|
||||
}
|
||||
} else if data.Params.BasicConstraintsValidForNonCA {
|
||||
certTemplate.BasicConstraintsValid = true
|
||||
certTemplate.IsCA = false
|
||||
}
|
||||
|
||||
if len(data.Params.PermittedDNSDomains) > 0 {
|
||||
certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains
|
||||
certTemplate.PermittedDNSDomainsCritical = true
|
||||
}
|
||||
|
||||
certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey)
|
||||
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
|
||||
}
|
||||
|
||||
result.CertificateBytes = certBytes
|
||||
result.Certificate, err = x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
|
||||
}
|
||||
|
||||
result.CAChain = data.SigningBundle.GetCAChain()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewCertPool(reader io.Reader) (*x509.CertPool, error) {
|
||||
pemBlock, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certs, err := parseCertsPEM(pemBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading certs: %s", err)
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, cert := range certs {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
|
||||
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
|
||||
func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
|
||||
ok := false
|
||||
certs := []*x509.Certificate{}
|
||||
for len(pemCerts) > 0 {
|
||||
var block *pem.Block
|
||||
block, pemCerts = pem.Decode(pemCerts)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
// Only use PEM "CERTIFICATE" blocks without extra headers
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return certs, err
|
||||
}
|
||||
|
||||
certs = append(certs, cert)
|
||||
ok = true
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
|
||||
}
|
||||
return certs, nil
|
||||
}
|
766
vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go
generated
vendored
Normal file
766
vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go
generated
vendored
Normal file
@ -0,0 +1,766 @@
|
||||
// Package certutil contains helper functions that are mostly used
|
||||
// with the PKI backend but can be generally useful. Functionality
|
||||
// includes helpers for converting a certificate/private key bundle
|
||||
// between DER and PEM, printing certificate serial numbers, and more.
|
||||
//
|
||||
// Functionality specific to the PKI backend includes some types
|
||||
// and helper methods to make requesting certificates from the
|
||||
// backend easy.
|
||||
package certutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/sdk/helper/errutil"
|
||||
)
|
||||
|
||||
const (
|
||||
PrivateKeyTypeP521 = "p521"
|
||||
)
|
||||
|
||||
// This can be one of a few key types so the different params may or may not be filled
|
||||
type ClusterKeyParams struct {
|
||||
Type string `json:"type" structs:"type" mapstructure:"type"`
|
||||
X *big.Int `json:"x" structs:"x" mapstructure:"x"`
|
||||
Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
|
||||
D *big.Int `json:"d" structs:"d" mapstructure:"d"`
|
||||
}
|
||||
|
||||
// Secret is used to attempt to unmarshal a Vault secret
|
||||
// JSON response, as a convenience
|
||||
type Secret struct {
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// PrivateKeyType holds a string representation of the type of private key (ec
|
||||
// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial
|
||||
// names rather than official names, to eliminate confusion
|
||||
type PrivateKeyType string
|
||||
|
||||
// Well-known PrivateKeyTypes
|
||||
const (
|
||||
UnknownPrivateKey PrivateKeyType = ""
|
||||
RSAPrivateKey PrivateKeyType = "rsa"
|
||||
ECPrivateKey PrivateKeyType = "ec"
|
||||
)
|
||||
|
||||
// TLSUsage controls whether the intended usage of a *tls.Config
|
||||
// returned from ParsedCertBundle.getTLSConfig is for server use,
|
||||
// client use, or both, which affects which values are set
|
||||
type TLSUsage int
|
||||
|
||||
// Well-known TLSUsage types
|
||||
const (
|
||||
TLSUnknown TLSUsage = 0
|
||||
TLSServer TLSUsage = 1 << iota
|
||||
TLSClient
|
||||
)
|
||||
|
||||
// BlockType indicates the serialization format of the key
|
||||
type BlockType string
|
||||
|
||||
// Well-known formats
|
||||
const (
|
||||
PKCS1Block BlockType = "RSA PRIVATE KEY"
|
||||
PKCS8Block BlockType = "PRIVATE KEY"
|
||||
ECBlock BlockType = "EC PRIVATE KEY"
|
||||
)
|
||||
|
||||
// ParsedPrivateKeyContainer allows common key setting for certs and CSRs
|
||||
type ParsedPrivateKeyContainer interface {
|
||||
SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte)
|
||||
}
|
||||
|
||||
// CertBlock contains the DER-encoded certificate and the PEM
|
||||
// block's byte array
|
||||
type CertBlock struct {
|
||||
Certificate *x509.Certificate
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// CertBundle contains a key type, a PEM-encoded private key,
|
||||
// a PEM-encoded certificate, and a string-encoded serial number,
|
||||
// returned from a successful Issue request
|
||||
type CertBundle struct {
|
||||
PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"`
|
||||
Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
|
||||
IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"`
|
||||
CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"`
|
||||
PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"`
|
||||
SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"`
|
||||
}
|
||||
|
||||
// ParsedCertBundle contains a key type, a DER-encoded private key,
|
||||
// and a DER-encoded certificate
|
||||
type ParsedCertBundle struct {
|
||||
PrivateKeyType PrivateKeyType
|
||||
PrivateKeyFormat BlockType
|
||||
PrivateKeyBytes []byte
|
||||
PrivateKey crypto.Signer
|
||||
CertificateBytes []byte
|
||||
Certificate *x509.Certificate
|
||||
CAChain []*CertBlock
|
||||
}
|
||||
|
||||
// CSRBundle contains a key type, a PEM-encoded private key,
|
||||
// and a PEM-encoded CSR
|
||||
type CSRBundle struct {
|
||||
PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"`
|
||||
CSR string `json:"csr" structs:"csr" mapstructure:"csr"`
|
||||
PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"`
|
||||
}
|
||||
|
||||
// ParsedCSRBundle contains a key type, a DER-encoded private key,
|
||||
// and a DER-encoded certificate request
|
||||
type ParsedCSRBundle struct {
|
||||
PrivateKeyType PrivateKeyType
|
||||
PrivateKeyBytes []byte
|
||||
PrivateKey crypto.Signer
|
||||
CSRBytes []byte
|
||||
CSR *x509.CertificateRequest
|
||||
}
|
||||
|
||||
// ToPEMBundle converts a string-based certificate bundle
|
||||
// to a PEM-based string certificate bundle in trust path
|
||||
// order, leaf certificate first
|
||||
func (c *CertBundle) ToPEMBundle() string {
|
||||
var result []string
|
||||
|
||||
if len(c.PrivateKey) > 0 {
|
||||
result = append(result, c.PrivateKey)
|
||||
}
|
||||
if len(c.Certificate) > 0 {
|
||||
result = append(result, c.Certificate)
|
||||
}
|
||||
if len(c.CAChain) > 0 {
|
||||
result = append(result, c.CAChain...)
|
||||
}
|
||||
|
||||
return strings.Join(result, "\n")
|
||||
}
|
||||
|
||||
// ToParsedCertBundle converts a string-based certificate bundle
|
||||
// to a byte-based raw certificate bundle
|
||||
func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
|
||||
result := &ParsedCertBundle{}
|
||||
var err error
|
||||
var pemBlock *pem.Block
|
||||
|
||||
if len(c.PrivateKey) > 0 {
|
||||
pemBlock, _ = pem.Decode([]byte(c.PrivateKey))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"}
|
||||
}
|
||||
|
||||
result.PrivateKeyBytes = pemBlock.Bytes
|
||||
result.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type))
|
||||
|
||||
switch result.PrivateKeyFormat {
|
||||
case ECBlock:
|
||||
result.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey
|
||||
case PKCS1Block:
|
||||
c.PrivateKeyType, result.PrivateKeyType = RSAPrivateKey, RSAPrivateKey
|
||||
case PKCS8Block:
|
||||
t, err := getPKCS8Type(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)}
|
||||
}
|
||||
result.PrivateKeyType = t
|
||||
switch t {
|
||||
case ECPrivateKey:
|
||||
c.PrivateKeyType = ECPrivateKey
|
||||
case RSAPrivateKey:
|
||||
c.PrivateKeyType = RSAPrivateKey
|
||||
}
|
||||
default:
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)}
|
||||
}
|
||||
|
||||
result.PrivateKey, err = result.getSigner()
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.Certificate) > 0 {
|
||||
pemBlock, _ = pem.Decode([]byte(c.Certificate))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
|
||||
}
|
||||
result.CertificateBytes = pemBlock.Bytes
|
||||
result.Certificate, err = x509.ParseCertificate(result.CertificateBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)}
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(c.CAChain) > 0:
|
||||
for _, cert := range c.CAChain {
|
||||
pemBlock, _ := pem.Decode([]byte(cert))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
|
||||
}
|
||||
|
||||
parsedCert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)}
|
||||
}
|
||||
|
||||
certBlock := &CertBlock{
|
||||
Bytes: pemBlock.Bytes,
|
||||
Certificate: parsedCert,
|
||||
}
|
||||
result.CAChain = append(result.CAChain, certBlock)
|
||||
}
|
||||
|
||||
// For backwards compatibility
|
||||
case len(c.IssuingCA) > 0:
|
||||
pemBlock, _ = pem.Decode([]byte(c.IssuingCA))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"}
|
||||
}
|
||||
|
||||
parsedCert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)}
|
||||
}
|
||||
|
||||
certBlock := &CertBlock{
|
||||
Bytes: pemBlock.Bytes,
|
||||
Certificate: parsedCert,
|
||||
}
|
||||
result.CAChain = append(result.CAChain, certBlock)
|
||||
}
|
||||
|
||||
// Populate if it isn't there already
|
||||
if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 {
|
||||
c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ToCertBundle converts a byte-based raw DER certificate bundle
|
||||
// to a PEM-based string certificate bundle
|
||||
func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) {
|
||||
result := &CertBundle{}
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
}
|
||||
|
||||
if p.Certificate != nil {
|
||||
result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":"))
|
||||
}
|
||||
|
||||
if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 {
|
||||
block.Bytes = p.CertificateBytes
|
||||
result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
|
||||
}
|
||||
|
||||
for _, caCert := range p.CAChain {
|
||||
block.Bytes = caCert.Bytes
|
||||
certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block)))
|
||||
|
||||
result.CAChain = append(result.CAChain, certificate)
|
||||
}
|
||||
|
||||
if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 {
|
||||
block.Type = string(p.PrivateKeyFormat)
|
||||
block.Bytes = p.PrivateKeyBytes
|
||||
result.PrivateKeyType = p.PrivateKeyType
|
||||
|
||||
// Handle bundle not parsed by us
|
||||
if block.Type == "" {
|
||||
switch p.PrivateKeyType {
|
||||
case ECPrivateKey:
|
||||
block.Type = string(ECBlock)
|
||||
case RSAPrivateKey:
|
||||
block.Type = string(PKCS1Block)
|
||||
}
|
||||
}
|
||||
|
||||
result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Verify checks if the parsed bundle is valid. It validates the public
|
||||
// key of the certificate to the private key and checks the certificate trust
|
||||
// chain for path issues.
|
||||
func (p *ParsedCertBundle) Verify() error {
|
||||
// If private key exists, check if it matches the public key of cert
|
||||
if p.PrivateKey != nil && p.Certificate != nil {
|
||||
equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public())
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("could not compare public and private keys: {{err}}", err)
|
||||
}
|
||||
if !equal {
|
||||
return fmt.Errorf("public key of certificate does not match private key")
|
||||
}
|
||||
}
|
||||
|
||||
certPath := p.GetCertificatePath()
|
||||
if len(certPath) > 1 {
|
||||
for i, caCert := range certPath[1:] {
|
||||
if !caCert.Certificate.IsCA {
|
||||
return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1)
|
||||
}
|
||||
if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) {
|
||||
return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q) (%X/%X)",
|
||||
i+1,
|
||||
certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName,
|
||||
certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCertificatePath returns a slice of certificates making up a path, pulled
|
||||
// from the parsed cert bundle
|
||||
func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock {
|
||||
var certPath []*CertBlock
|
||||
|
||||
certPath = append(certPath, &CertBlock{
|
||||
Certificate: p.Certificate,
|
||||
Bytes: p.CertificateBytes,
|
||||
})
|
||||
|
||||
if len(p.CAChain) > 0 {
|
||||
// Root CA puts itself in the chain
|
||||
if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber {
|
||||
certPath = append(certPath, p.CAChain...)
|
||||
}
|
||||
}
|
||||
|
||||
return certPath
|
||||
}
|
||||
|
||||
// GetSigner returns a crypto.Signer corresponding to the private key
|
||||
// contained in this ParsedCertBundle. The Signer contains a Public() function
|
||||
// for getting the corresponding public. The Signer can also be
|
||||
// type-converted to private keys
|
||||
func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) {
|
||||
var signer crypto.Signer
|
||||
var err error
|
||||
|
||||
if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 {
|
||||
return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"}
|
||||
}
|
||||
|
||||
switch p.PrivateKeyFormat {
|
||||
case ECBlock:
|
||||
signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
|
||||
}
|
||||
|
||||
case PKCS1Block:
|
||||
signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
|
||||
}
|
||||
|
||||
case PKCS8Block:
|
||||
if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil {
|
||||
switch k := k.(type) {
|
||||
case *rsa.PrivateKey, *ecdsa.PrivateKey:
|
||||
return k.(crypto.Signer), nil
|
||||
default:
|
||||
return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"}
|
||||
}
|
||||
}
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
|
||||
default:
|
||||
return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"}
|
||||
}
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// SetParsedPrivateKey sets the private key parameters on the bundle
|
||||
func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) {
|
||||
p.PrivateKey = privateKey
|
||||
p.PrivateKeyType = privateKeyType
|
||||
p.PrivateKeyBytes = privateKeyBytes
|
||||
}
|
||||
|
||||
func getPKCS8Type(bs []byte) (PrivateKeyType, error) {
|
||||
k, err := x509.ParsePKCS8PrivateKey(bs)
|
||||
if err != nil {
|
||||
return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
|
||||
}
|
||||
|
||||
switch k.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
return ECPrivateKey, nil
|
||||
case *rsa.PrivateKey:
|
||||
return RSAPrivateKey, nil
|
||||
default:
|
||||
return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"}
|
||||
}
|
||||
}
|
||||
|
||||
// ToParsedCSRBundle converts a string-based CSR bundle
|
||||
// to a byte-based raw CSR bundle
|
||||
func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) {
|
||||
result := &ParsedCSRBundle{}
|
||||
var err error
|
||||
var pemBlock *pem.Block
|
||||
|
||||
if len(c.PrivateKey) > 0 {
|
||||
pemBlock, _ = pem.Decode([]byte(c.PrivateKey))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"}
|
||||
}
|
||||
result.PrivateKeyBytes = pemBlock.Bytes
|
||||
|
||||
switch BlockType(pemBlock.Type) {
|
||||
case ECBlock:
|
||||
result.PrivateKeyType = ECPrivateKey
|
||||
case PKCS1Block:
|
||||
result.PrivateKeyType = RSAPrivateKey
|
||||
default:
|
||||
// Try to figure it out and correct
|
||||
if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil {
|
||||
result.PrivateKeyType = ECPrivateKey
|
||||
c.PrivateKeyType = "ec"
|
||||
} else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil {
|
||||
result.PrivateKeyType = RSAPrivateKey
|
||||
c.PrivateKeyType = "rsa"
|
||||
} else {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)}
|
||||
}
|
||||
}
|
||||
|
||||
result.PrivateKey, err = result.getSigner()
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.CSR) > 0 {
|
||||
pemBlock, _ = pem.Decode([]byte(c.CSR))
|
||||
if pemBlock == nil {
|
||||
return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
|
||||
}
|
||||
result.CSRBytes = pemBlock.Bytes
|
||||
result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ToCSRBundle converts a byte-based raw DER certificate bundle
|
||||
// to a PEM-based string certificate bundle
|
||||
func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) {
|
||||
result := &CSRBundle{}
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
}
|
||||
|
||||
if p.CSRBytes != nil && len(p.CSRBytes) > 0 {
|
||||
block.Bytes = p.CSRBytes
|
||||
result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
|
||||
}
|
||||
|
||||
if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 {
|
||||
block.Bytes = p.PrivateKeyBytes
|
||||
switch p.PrivateKeyType {
|
||||
case RSAPrivateKey:
|
||||
result.PrivateKeyType = "rsa"
|
||||
block.Type = "RSA PRIVATE KEY"
|
||||
case ECPrivateKey:
|
||||
result.PrivateKeyType = "ec"
|
||||
block.Type = "EC PRIVATE KEY"
|
||||
default:
|
||||
return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"}
|
||||
}
|
||||
result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetSigner returns a crypto.Signer corresponding to the private key
|
||||
// contained in this ParsedCSRBundle. The Signer contains a Public() function
|
||||
// for getting the corresponding public. The Signer can also be
|
||||
// type-converted to private keys
|
||||
func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) {
|
||||
var signer crypto.Signer
|
||||
var err error
|
||||
|
||||
if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 {
|
||||
return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"}
|
||||
}
|
||||
|
||||
switch p.PrivateKeyType {
|
||||
case ECPrivateKey:
|
||||
signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
|
||||
}
|
||||
|
||||
case RSAPrivateKey:
|
||||
signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"}
|
||||
}
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// SetParsedPrivateKey sets the private key parameters on the bundle
|
||||
func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) {
|
||||
p.PrivateKey = privateKey
|
||||
p.PrivateKeyType = privateKeyType
|
||||
p.PrivateKeyBytes = privateKeyBytes
|
||||
}
|
||||
|
||||
// getTLSConfig returns a TLS config generally suitable for client
|
||||
// authentication. The returned TLS config can be modified slightly
|
||||
// to be made suitable for a server requiring client authentication;
|
||||
// specifically, you should set the value of ClientAuth in the returned
|
||||
// config to match your needs.
|
||||
func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) {
|
||||
tlsCert := tls.Certificate{
|
||||
Certificate: [][]byte{},
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
if p.Certificate != nil {
|
||||
tlsCert.Leaf = p.Certificate
|
||||
}
|
||||
|
||||
if p.PrivateKey != nil {
|
||||
tlsCert.PrivateKey = p.PrivateKey
|
||||
}
|
||||
|
||||
if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 {
|
||||
tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes)
|
||||
}
|
||||
|
||||
if len(p.CAChain) > 0 {
|
||||
for _, cert := range p.CAChain {
|
||||
tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes)
|
||||
}
|
||||
|
||||
// Technically we only need one cert, but this doesn't duplicate code
|
||||
certBundle, err := p.ToCertBundle()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err)
|
||||
}
|
||||
|
||||
caPool := x509.NewCertPool()
|
||||
ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0]))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not append CA certificate")
|
||||
}
|
||||
|
||||
if usage&TLSServer > 0 {
|
||||
tlsConfig.ClientCAs = caPool
|
||||
tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven
|
||||
}
|
||||
if usage&TLSClient > 0 {
|
||||
tlsConfig.RootCAs = caPool
|
||||
}
|
||||
}
|
||||
|
||||
if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 {
|
||||
tlsConfig.Certificates = []tls.Certificate{tlsCert}
|
||||
tlsConfig.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// IssueData is a structure that is suitable for marshaling into a request;
|
||||
// either via JSON, or into a map[string]interface{} via the structs package
|
||||
type IssueData struct {
|
||||
TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"`
|
||||
CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"`
|
||||
OU string `json:"ou" structs:"ou" mapstructure:"ou"`
|
||||
AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"`
|
||||
IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"`
|
||||
CSR string `json:"csr" structs:"csr" mapstructure:"csr"`
|
||||
OtherSANs string `json:"other_sans" structs:"other_sans" mapstructure:"other_sans"`
|
||||
}
|
||||
|
||||
type URLEntries struct {
|
||||
IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"`
|
||||
CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"`
|
||||
OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"`
|
||||
}
|
||||
|
||||
type CAInfoBundle struct {
|
||||
ParsedCertBundle
|
||||
URLs *URLEntries
|
||||
}
|
||||
|
||||
func (b *CAInfoBundle) GetCAChain() []*CertBlock {
|
||||
chain := []*CertBlock{}
|
||||
|
||||
// Include issuing CA in Chain, not including Root Authority
|
||||
if (len(b.Certificate.AuthorityKeyId) > 0 &&
|
||||
!bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) ||
|
||||
(len(b.Certificate.AuthorityKeyId) == 0 &&
|
||||
!bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) {
|
||||
|
||||
chain = append(chain, &CertBlock{
|
||||
Certificate: b.Certificate,
|
||||
Bytes: b.CertificateBytes,
|
||||
})
|
||||
if b.CAChain != nil && len(b.CAChain) > 0 {
|
||||
chain = append(chain, b.CAChain...)
|
||||
}
|
||||
}
|
||||
|
||||
return chain
|
||||
}
|
||||
|
||||
type CertExtKeyUsage int
|
||||
|
||||
const (
|
||||
AnyExtKeyUsage CertExtKeyUsage = 1 << iota
|
||||
ServerAuthExtKeyUsage
|
||||
ClientAuthExtKeyUsage
|
||||
CodeSigningExtKeyUsage
|
||||
EmailProtectionExtKeyUsage
|
||||
IpsecEndSystemExtKeyUsage
|
||||
IpsecTunnelExtKeyUsage
|
||||
IpsecUserExtKeyUsage
|
||||
TimeStampingExtKeyUsage
|
||||
OcspSigningExtKeyUsage
|
||||
MicrosoftServerGatedCryptoExtKeyUsage
|
||||
NetscapeServerGatedCryptoExtKeyUsage
|
||||
MicrosoftCommercialCodeSigningExtKeyUsage
|
||||
MicrosoftKernelCodeSigningExtKeyUsage
|
||||
)
|
||||
|
||||
type CreationParameters struct {
|
||||
Subject pkix.Name
|
||||
DNSNames []string
|
||||
EmailAddresses []string
|
||||
IPAddresses []net.IP
|
||||
URIs []*url.URL
|
||||
OtherSANs map[string][]string
|
||||
IsCA bool
|
||||
KeyType string
|
||||
KeyBits int
|
||||
NotAfter time.Time
|
||||
KeyUsage x509.KeyUsage
|
||||
ExtKeyUsage CertExtKeyUsage
|
||||
ExtKeyUsageOIDs []string
|
||||
PolicyIdentifiers []string
|
||||
BasicConstraintsValidForNonCA bool
|
||||
|
||||
// Only used when signing a CA cert
|
||||
UseCSRValues bool
|
||||
PermittedDNSDomains []string
|
||||
|
||||
// URLs to encode into the certificate
|
||||
URLs *URLEntries
|
||||
|
||||
// The maximum path length to encode
|
||||
MaxPathLength int
|
||||
|
||||
// The duration the certificate will use NotBefore
|
||||
NotBeforeDuration time.Duration
|
||||
}
|
||||
|
||||
type CreationBundle struct {
|
||||
Params *CreationParameters
|
||||
SigningBundle *CAInfoBundle
|
||||
CSR *x509.CertificateRequest
|
||||
}
|
||||
|
||||
// addKeyUsages adds appropriate key usages to the template given the creation
|
||||
// information
|
||||
func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) {
|
||||
if data.Params.IsCA {
|
||||
certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign)
|
||||
return
|
||||
}
|
||||
|
||||
certTemplate.KeyUsage = data.Params.KeyUsage
|
||||
|
||||
if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning)
|
||||
}
|
||||
|
||||
if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 {
|
||||
certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning)
|
||||
}
|
||||
}
|
11
vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package cryptoutil
|
||||
|
||||
import "golang.org/x/crypto/blake2b"
|
||||
|
||||
func Blake2b256Hash(key string) []byte {
|
||||
hf, _ := blake2b.New256(nil)
|
||||
|
||||
hf.Write([]byte(key))
|
||||
|
||||
return hf.Sum(nil)
|
||||
}
|
20
vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package errutil
|
||||
|
||||
// UserError represents an error generated due to invalid user input
|
||||
type UserError struct {
|
||||
Err string
|
||||
}
|
||||
|
||||
func (e UserError) Error() string {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// InternalError represents an error generated internally,
|
||||
// presumably not due to invalid user input
|
||||
type InternalError struct {
|
||||
Err string
|
||||
}
|
||||
|
||||
func (e InternalError) Error() string {
|
||||
return e.Err
|
||||
}
|
10
vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go
generated
vendored
Normal file
10
vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package license
|
||||
|
||||
// Features is a bitmask of feature flags
|
||||
type Features uint
|
||||
|
||||
const FeatureNone Features = 0
|
||||
|
||||
func (f Features) HasFeature(flag Features) bool {
|
||||
return false
|
||||
}
|
59
vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go
generated
vendored
Normal file
59
vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package locksutil
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/cryptoutil"
|
||||
)
|
||||
|
||||
const (
|
||||
LockCount = 256
|
||||
)
|
||||
|
||||
type LockEntry struct {
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// CreateLocks returns an array so that the locks can be iterated over in
|
||||
// order.
|
||||
//
|
||||
// This is only threadsafe if a process is using a single lock, or iterating
|
||||
// over the entire lock slice in order. Using a consistent order avoids
|
||||
// deadlocks because you can never have the following:
|
||||
//
|
||||
// Lock A, Lock B
|
||||
// Lock B, Lock A
|
||||
//
|
||||
// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A
|
||||
//
|
||||
func CreateLocks() []*LockEntry {
|
||||
ret := make([]*LockEntry, LockCount)
|
||||
for i := range ret {
|
||||
ret[i] = new(LockEntry)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func LockIndexForKey(key string) uint8 {
|
||||
return uint8(cryptoutil.Blake2b256Hash(key)[0])
|
||||
}
|
||||
|
||||
func LockForKey(locks []*LockEntry, key string) *LockEntry {
|
||||
return locks[LockIndexForKey(key)]
|
||||
}
|
||||
|
||||
func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry {
|
||||
lockIndexes := make(map[uint8]struct{}, len(keys))
|
||||
for _, k := range keys {
|
||||
lockIndexes[LockIndexForKey(k)] = struct{}{}
|
||||
}
|
||||
|
||||
locksToReturn := make([]*LockEntry, 0, len(keys))
|
||||
for i, l := range locks {
|
||||
if _, ok := lockIndexes[uint8(i)]; ok {
|
||||
locksToReturn = append(locksToReturn, l)
|
||||
}
|
||||
}
|
||||
|
||||
return locksToReturn
|
||||
}
|
80
vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go
generated
vendored
Normal file
80
vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
type LogFormat int
|
||||
|
||||
const (
|
||||
UnspecifiedFormat LogFormat = iota
|
||||
StandardFormat
|
||||
JSONFormat
|
||||
)
|
||||
|
||||
// Stringer implementation
|
||||
func (l LogFormat) String() string {
|
||||
switch l {
|
||||
case UnspecifiedFormat:
|
||||
return "unspecified"
|
||||
case StandardFormat:
|
||||
return "standard"
|
||||
case JSONFormat:
|
||||
return "json"
|
||||
}
|
||||
|
||||
// unreachable
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// NewVaultLogger creates a new logger with the specified level and a Vault
|
||||
// formatter
|
||||
func NewVaultLogger(level log.Level) log.Logger {
|
||||
return NewVaultLoggerWithWriter(log.DefaultOutput, level)
|
||||
}
|
||||
|
||||
// NewVaultLoggerWithWriter creates a new logger with the specified level and
|
||||
// writer and a Vault formatter
|
||||
func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger {
|
||||
opts := &log.LoggerOptions{
|
||||
Level: level,
|
||||
Output: w,
|
||||
JSONFormat: ParseEnvLogFormat() == JSONFormat,
|
||||
}
|
||||
return log.New(opts)
|
||||
}
|
||||
|
||||
// ParseLogFormat parses the log format from the provided string.
|
||||
func ParseLogFormat(format string) (LogFormat, error) {
|
||||
switch strings.ToLower(strings.TrimSpace(format)) {
|
||||
case "":
|
||||
return UnspecifiedFormat, nil
|
||||
case "standard":
|
||||
return StandardFormat, nil
|
||||
case "json":
|
||||
return JSONFormat, nil
|
||||
default:
|
||||
return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseEnvLogFormat parses the log format from an environment variable.
|
||||
func ParseEnvLogFormat() LogFormat {
|
||||
logFormat := os.Getenv("VAULT_LOG_FORMAT")
|
||||
if logFormat == "" {
|
||||
logFormat = os.Getenv("LOGXI_FORMAT")
|
||||
}
|
||||
switch strings.ToLower(logFormat) {
|
||||
case "json", "vault_json", "vault-json", "vaultjson":
|
||||
return JSONFormat
|
||||
case "standard":
|
||||
return StandardFormat
|
||||
default:
|
||||
return UnspecifiedFormat
|
||||
}
|
||||
}
|
15
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock.go
generated
vendored
Normal file
15
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package mlock
|
||||
|
||||
// This should be set by the OS-specific packages to tell whether LockMemory
|
||||
// is supported or not.
|
||||
var supported bool
|
||||
|
||||
// Supported returns true if LockMemory is functional on this system.
|
||||
func Supported() bool {
|
||||
return supported
|
||||
}
|
||||
|
||||
// LockMemory prevents any memory from being swapped to disk.
|
||||
func LockMemory() error {
|
||||
return lockMemory()
|
||||
}
|
13
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unavail.go
generated
vendored
Normal file
13
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unavail.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build android darwin nacl netbsd plan9 windows
|
||||
|
||||
package mlock
|
||||
|
||||
func init() {
|
||||
supported = false
|
||||
}
|
||||
|
||||
func lockMemory() error {
|
||||
// XXX: No good way to do this on Windows. There is the VirtualLock
|
||||
// method, but it requires a specific address and offset.
|
||||
return nil
|
||||
}
|
18
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unix.go
generated
vendored
Normal file
18
vendor/github.com/hashicorp/vault/sdk/helper/mlock/mlock_unix.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build dragonfly freebsd linux openbsd solaris
|
||||
|
||||
package mlock
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
supported = true
|
||||
}
|
||||
|
||||
func lockMemory() error {
|
||||
// Mlockall prevents all current and future pages from being swapped out.
|
||||
return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE)
|
||||
}
|
309
vendor/github.com/hashicorp/vault/sdk/helper/parseutil/parseutil.go
generated
vendored
309
vendor/github.com/hashicorp/vault/sdk/helper/parseutil/parseutil.go
generated
vendored
@ -1,309 +0,0 @@
|
||||
package parseutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
sockaddr "github.com/hashicorp/go-sockaddr"
|
||||
"github.com/hashicorp/vault/sdk/helper/strutil"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
var validCapacityString = regexp.MustCompile("^[\t ]*([0-9]+)[\t ]?([kmgtKMGT][iI]?[bB])?[\t ]*$")
|
||||
|
||||
// ParseCapacityString parses a capacity string and returns the number of bytes it represents.
|
||||
// Capacity strings are things like 5gib or 10MB. Supported prefixes are kb, kib, mb, mib, gb,
|
||||
// gib, tb, tib, which are not case sensitive. If no prefix is present, the number is assumed
|
||||
// to be in bytes already.
|
||||
func ParseCapacityString(in interface{}) (uint64, error) {
|
||||
var cap uint64
|
||||
|
||||
jsonIn, ok := in.(json.Number)
|
||||
if ok {
|
||||
in = jsonIn.String()
|
||||
}
|
||||
|
||||
switch inp := in.(type) {
|
||||
case nil:
|
||||
// return default of zero
|
||||
case string:
|
||||
if inp == "" {
|
||||
return cap, nil
|
||||
}
|
||||
|
||||
matches := validCapacityString.FindStringSubmatch(inp)
|
||||
|
||||
// no sub-groups means we couldn't parse it
|
||||
if len(matches) <= 1 {
|
||||
return cap, errors.New("could not parse capacity from input")
|
||||
}
|
||||
|
||||
var multiplier uint64 = 1
|
||||
switch strings.ToLower(matches[2]) {
|
||||
case "kb":
|
||||
multiplier = 1000
|
||||
case "kib":
|
||||
multiplier = 1024
|
||||
case "mb":
|
||||
multiplier = 1000 * 1000
|
||||
case "mib":
|
||||
multiplier = 1024 * 1024
|
||||
case "gb":
|
||||
multiplier = 1000 * 1000 * 1000
|
||||
case "gib":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "tb":
|
||||
multiplier = 1000 * 1000 * 1000 * 1000
|
||||
case "tib":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
size, err := strconv.ParseUint(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return cap, err
|
||||
}
|
||||
|
||||
cap = size * multiplier
|
||||
case int:
|
||||
cap = uint64(inp)
|
||||
case int32:
|
||||
cap = uint64(inp)
|
||||
case int64:
|
||||
cap = uint64(inp)
|
||||
case uint:
|
||||
cap = uint64(inp)
|
||||
case uint32:
|
||||
cap = uint64(inp)
|
||||
case uint64:
|
||||
cap = uint64(inp)
|
||||
case float32:
|
||||
cap = uint64(inp)
|
||||
case float64:
|
||||
cap = uint64(inp)
|
||||
default:
|
||||
return cap, errors.New("could not parse capacity from input")
|
||||
}
|
||||
|
||||
return cap, nil
|
||||
}
|
||||
|
||||
func ParseDurationSecond(in interface{}) (time.Duration, error) {
|
||||
var dur time.Duration
|
||||
jsonIn, ok := in.(json.Number)
|
||||
if ok {
|
||||
in = jsonIn.String()
|
||||
}
|
||||
switch inp := in.(type) {
|
||||
case nil:
|
||||
// return default of zero
|
||||
case string:
|
||||
if inp == "" {
|
||||
return dur, nil
|
||||
}
|
||||
var err error
|
||||
// Look for a suffix otherwise its a plain second value
|
||||
if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") || strings.HasSuffix(inp, "ms") {
|
||||
dur, err = time.ParseDuration(inp)
|
||||
if err != nil {
|
||||
return dur, err
|
||||
}
|
||||
} else {
|
||||
// Plain integer
|
||||
secs, err := strconv.ParseInt(inp, 10, 64)
|
||||
if err != nil {
|
||||
return dur, err
|
||||
}
|
||||
dur = time.Duration(secs) * time.Second
|
||||
}
|
||||
case int:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case int32:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case int64:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case uint:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case uint32:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case uint64:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case float32:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case float64:
|
||||
dur = time.Duration(inp) * time.Second
|
||||
case time.Duration:
|
||||
dur = inp
|
||||
default:
|
||||
return 0, errors.New("could not parse duration from input")
|
||||
}
|
||||
|
||||
return dur, nil
|
||||
}
|
||||
|
||||
func ParseAbsoluteTime(in interface{}) (time.Time, error) {
|
||||
var t time.Time
|
||||
switch inp := in.(type) {
|
||||
case nil:
|
||||
// return default of zero
|
||||
return t, nil
|
||||
case string:
|
||||
// Allow RFC3339 with nanoseconds, or without,
|
||||
// or an epoch time as an integer.
|
||||
var err error
|
||||
t, err = time.Parse(time.RFC3339Nano, inp)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
t, err = time.Parse(time.RFC3339, inp)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
epochTime, err := strconv.ParseInt(inp, 10, 64)
|
||||
if err == nil {
|
||||
t = time.Unix(epochTime, 0)
|
||||
break
|
||||
}
|
||||
return t, errors.New("could not parse string as date and time")
|
||||
case json.Number:
|
||||
epochTime, err := inp.Int64()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
t = time.Unix(epochTime, 0)
|
||||
case int:
|
||||
t = time.Unix(int64(inp), 0)
|
||||
case int32:
|
||||
t = time.Unix(int64(inp), 0)
|
||||
case int64:
|
||||
t = time.Unix(inp, 0)
|
||||
case uint:
|
||||
t = time.Unix(int64(inp), 0)
|
||||
case uint32:
|
||||
t = time.Unix(int64(inp), 0)
|
||||
case uint64:
|
||||
t = time.Unix(int64(inp), 0)
|
||||
default:
|
||||
return t, errors.New("could not parse time from input type")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func ParseInt(in interface{}) (int64, error) {
|
||||
var ret int64
|
||||
jsonIn, ok := in.(json.Number)
|
||||
if ok {
|
||||
in = jsonIn.String()
|
||||
}
|
||||
switch in.(type) {
|
||||
case string:
|
||||
inp := in.(string)
|
||||
if inp == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var err error
|
||||
left, err := strconv.ParseInt(inp, 10, 64)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret = left
|
||||
case int:
|
||||
ret = int64(in.(int))
|
||||
case int32:
|
||||
ret = int64(in.(int32))
|
||||
case int64:
|
||||
ret = in.(int64)
|
||||
case uint:
|
||||
ret = int64(in.(uint))
|
||||
case uint32:
|
||||
ret = int64(in.(uint32))
|
||||
case uint64:
|
||||
ret = int64(in.(uint64))
|
||||
default:
|
||||
return 0, errors.New("could not parse value from input")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func ParseBool(in interface{}) (bool, error) {
|
||||
var result bool
|
||||
if err := mapstructure.WeakDecode(in, &result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseString(in interface{}) (string, error) {
|
||||
var result string
|
||||
if err := mapstructure.WeakDecode(in, &result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseCommaStringSlice(in interface{}) ([]string, error) {
|
||||
rawString, ok := in.(string)
|
||||
if ok && rawString == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
var result []string
|
||||
config := &mapstructure.DecoderConfig{
|
||||
Result: &result,
|
||||
WeaklyTypedInput: true,
|
||||
DecodeHook: mapstructure.StringToSliceHookFunc(","),
|
||||
}
|
||||
decoder, err := mapstructure.NewDecoder(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := decoder.Decode(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return strutil.TrimStrings(result), nil
|
||||
}
|
||||
|
||||
func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) {
|
||||
out := make([]*sockaddr.SockAddrMarshaler, 0)
|
||||
stringAddrs := make([]string, 0)
|
||||
|
||||
switch addrs.(type) {
|
||||
case string:
|
||||
stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",")
|
||||
if len(stringAddrs) == 0 {
|
||||
return nil, fmt.Errorf("unable to parse addresses from %v", addrs)
|
||||
}
|
||||
|
||||
case []string:
|
||||
stringAddrs = addrs.([]string)
|
||||
|
||||
case []interface{}:
|
||||
for _, v := range addrs.([]interface{}) {
|
||||
stringAddr, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing %v as string", v)
|
||||
}
|
||||
stringAddrs = append(stringAddrs, stringAddr)
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown address input type %T", addrs)
|
||||
}
|
||||
|
||||
for _, addr := range stringAddrs {
|
||||
sa, err := sockaddr.NewSockAddr(addr)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(fmt.Sprintf("error parsing address %q: {{err}}", addr), err)
|
||||
}
|
||||
out = append(out, &sockaddr.SockAddrMarshaler{
|
||||
SockAddr: sa,
|
||||
})
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
136
vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go
generated
vendored
Normal file
136
vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package pathmanager
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// PathManager is a prefix searchable index of paths
|
||||
type PathManager struct {
|
||||
l sync.RWMutex
|
||||
paths *iradix.Tree
|
||||
}
|
||||
|
||||
// New creates a new path manager
|
||||
func New() *PathManager {
|
||||
return &PathManager{
|
||||
paths: iradix.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// AddPaths adds path to the paths list
|
||||
func (p *PathManager) AddPaths(paths []string) {
|
||||
p.l.Lock()
|
||||
defer p.l.Unlock()
|
||||
|
||||
txn := p.paths.Txn()
|
||||
for _, prefix := range paths {
|
||||
if len(prefix) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var exception bool
|
||||
if strings.HasPrefix(prefix, "!") {
|
||||
prefix = strings.TrimPrefix(prefix, "!")
|
||||
exception = true
|
||||
}
|
||||
|
||||
// We trim any trailing *, but we don't touch whether it is a trailing
|
||||
// slash or not since we want to be able to ignore prefixes that fully
|
||||
// specify a file
|
||||
txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception)
|
||||
}
|
||||
p.paths = txn.Commit()
|
||||
}
|
||||
|
||||
// RemovePaths removes paths from the paths list
|
||||
func (p *PathManager) RemovePaths(paths []string) {
|
||||
p.l.Lock()
|
||||
defer p.l.Unlock()
|
||||
|
||||
txn := p.paths.Txn()
|
||||
for _, prefix := range paths {
|
||||
if len(prefix) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Exceptions aren't stored with the leading ! so strip it
|
||||
if strings.HasPrefix(prefix, "!") {
|
||||
prefix = strings.TrimPrefix(prefix, "!")
|
||||
}
|
||||
|
||||
// We trim any trailing *, but we don't touch whether it is a trailing
|
||||
// slash or not since we want to be able to ignore prefixes that fully
|
||||
// specify a file
|
||||
txn.Delete([]byte(strings.TrimSuffix(prefix, "*")))
|
||||
}
|
||||
p.paths = txn.Commit()
|
||||
}
|
||||
|
||||
// RemovePathPrefix removes all paths with the given prefix
|
||||
func (p *PathManager) RemovePathPrefix(prefix string) {
|
||||
p.l.Lock()
|
||||
defer p.l.Unlock()
|
||||
|
||||
// We trim any trailing *, but we don't touch whether it is a trailing
|
||||
// slash or not since we want to be able to ignore prefixes that fully
|
||||
// specify a file
|
||||
p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*")))
|
||||
}
|
||||
|
||||
// Len returns the number of paths
|
||||
func (p *PathManager) Len() int {
|
||||
return p.paths.Len()
|
||||
}
|
||||
|
||||
// Paths returns the path list
|
||||
func (p *PathManager) Paths() []string {
|
||||
p.l.RLock()
|
||||
defer p.l.RUnlock()
|
||||
|
||||
paths := make([]string, 0, p.paths.Len())
|
||||
walkFn := func(k []byte, v interface{}) bool {
|
||||
paths = append(paths, string(k))
|
||||
return false
|
||||
}
|
||||
p.paths.Root().Walk(walkFn)
|
||||
return paths
|
||||
}
|
||||
|
||||
// HasPath returns if the prefix for the path exists regardless if it is a path
|
||||
// (ending with /) or a prefix for a leaf node
|
||||
func (p *PathManager) HasPath(path string) bool {
|
||||
p.l.RLock()
|
||||
defer p.l.RUnlock()
|
||||
|
||||
if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok {
|
||||
var exception bool
|
||||
if exceptionRaw != nil {
|
||||
exception = exceptionRaw.(bool)
|
||||
}
|
||||
return !exception
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasExactPath returns if the longest match is an exact match for the
|
||||
// full path
|
||||
func (p *PathManager) HasExactPath(path string) bool {
|
||||
p.l.RLock()
|
||||
defer p.l.RUnlock()
|
||||
|
||||
if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok {
|
||||
var exception bool
|
||||
if exceptionRaw != nil {
|
||||
exception = exceptionRaw.(bool)
|
||||
}
|
||||
|
||||
strVal := string(val)
|
||||
if strings.HasSuffix(strVal, "/") || strVal == path {
|
||||
return !exception
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
69
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go
generated
vendored
Normal file
69
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package pluginutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/vault/sdk/helper/mlock"
|
||||
)
|
||||
|
||||
var (
|
||||
// PluginMlockEnabled is the ENV name used to pass the configuration for
|
||||
// enabling mlock
|
||||
PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED"
|
||||
|
||||
// PluginVaultVersionEnv is the ENV name used to pass the version of the
|
||||
// vault server to the plugin
|
||||
PluginVaultVersionEnv = "VAULT_VERSION"
|
||||
|
||||
// PluginMetadataModeEnv is an ENV name used to disable TLS communication
|
||||
// to bootstrap mounting plugins.
|
||||
PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE"
|
||||
|
||||
// PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the
|
||||
// plugin.
|
||||
PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN"
|
||||
|
||||
// PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded
|
||||
// string. Used for testing.
|
||||
PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM"
|
||||
)
|
||||
|
||||
// OptionallyEnableMlock determines if mlock should be called, and if so enables
|
||||
// mlock.
|
||||
func OptionallyEnableMlock() error {
|
||||
if os.Getenv(PluginMlockEnabled) == "true" {
|
||||
return mlock.LockMemory()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or
|
||||
// it fails to meet the version constraint.
|
||||
func GRPCSupport() bool {
|
||||
verString := os.Getenv(PluginVaultVersionEnv)
|
||||
// If the env var is empty, we fall back to netrpc for backward compatibility.
|
||||
if verString == "" {
|
||||
return false
|
||||
}
|
||||
if verString != "unknown" {
|
||||
ver, err := version.NewVersion(verString)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
// Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4
|
||||
// to allow the plugin framework to default to gRPC.
|
||||
constraint, err := version.NewConstraint(">= 0.9.4")
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return constraint.Check(ver)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// InMetadataMode returns true if the plugin calling this function is running in metadata mode.
|
||||
func InMetadataMode() bool {
|
||||
return os.Getenv(PluginMetadataModeEnv) == "true"
|
||||
}
|
161
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go
generated
vendored
Normal file
161
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
package pluginutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/vault/sdk/version"
|
||||
)
|
||||
|
||||
type runConfig struct {
|
||||
// Provided by PluginRunner
|
||||
command string
|
||||
args []string
|
||||
sha256 []byte
|
||||
|
||||
// Initialized with what's in PluginRunner.Env, but can be added to
|
||||
env []string
|
||||
|
||||
wrapper RunnerUtil
|
||||
pluginSets map[int]plugin.PluginSet
|
||||
hs plugin.HandshakeConfig
|
||||
logger log.Logger
|
||||
isMetadataMode bool
|
||||
autoMTLS bool
|
||||
}
|
||||
|
||||
func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) {
|
||||
cmd := exec.Command(rc.command, rc.args...)
|
||||
cmd.Env = append(cmd.Env, rc.env...)
|
||||
|
||||
// Add the mlock setting to the ENV of the plugin
|
||||
if rc.wrapper != nil && rc.wrapper.MlockEnabled() {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true"))
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version))
|
||||
|
||||
if rc.isMetadataMode {
|
||||
rc.logger = rc.logger.With("metadata", "true")
|
||||
}
|
||||
metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.isMetadataMode)
|
||||
cmd.Env = append(cmd.Env, metadataEnv)
|
||||
|
||||
var clientTLSConfig *tls.Config
|
||||
if !rc.autoMTLS && !rc.isMetadataMode {
|
||||
// Get a CA TLS Certificate
|
||||
certBytes, key, err := generateCert()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use CA to sign a client cert and return a configured TLS config
|
||||
clientTLSConfig, err = createClientTLSConfig(certBytes, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use CA to sign a server cert and wrap the values in a response wrapped
|
||||
// token.
|
||||
wrapToken, err := wrapServerConfig(ctx, rc.wrapper, certBytes, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add the response wrap token to the ENV of the plugin
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken))
|
||||
}
|
||||
|
||||
secureConfig := &plugin.SecureConfig{
|
||||
Checksum: rc.sha256,
|
||||
Hash: sha256.New(),
|
||||
}
|
||||
|
||||
clientConfig := &plugin.ClientConfig{
|
||||
HandshakeConfig: rc.hs,
|
||||
VersionedPlugins: rc.pluginSets,
|
||||
Cmd: cmd,
|
||||
SecureConfig: secureConfig,
|
||||
TLSConfig: clientTLSConfig,
|
||||
Logger: rc.logger,
|
||||
AllowedProtocols: []plugin.Protocol{
|
||||
plugin.ProtocolNetRPC,
|
||||
plugin.ProtocolGRPC,
|
||||
},
|
||||
AutoMTLS: rc.autoMTLS,
|
||||
}
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
||||
func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) {
|
||||
clientConfig, err := rc.makeConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := plugin.NewClient(clientConfig)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type RunOpt func(*runConfig)
|
||||
|
||||
func Env(env ...string) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.env = append(rc.env, env...)
|
||||
}
|
||||
}
|
||||
|
||||
func Runner(wrapper RunnerUtil) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.wrapper = wrapper
|
||||
}
|
||||
}
|
||||
|
||||
func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.pluginSets = pluginSets
|
||||
}
|
||||
}
|
||||
|
||||
func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.hs = hs
|
||||
}
|
||||
}
|
||||
|
||||
func Logger(logger log.Logger) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func MetadataMode(isMetadataMode bool) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.isMetadataMode = isMetadataMode
|
||||
}
|
||||
}
|
||||
|
||||
func AutoMTLS(autoMTLS bool) RunOpt {
|
||||
return func(rc *runConfig) {
|
||||
rc.autoMTLS = autoMTLS
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) {
|
||||
rc := runConfig{
|
||||
command: r.Command,
|
||||
args: r.Args,
|
||||
sha256: r.Sha256,
|
||||
env: r.Env,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&rc)
|
||||
}
|
||||
|
||||
return rc.run(ctx)
|
||||
}
|
88
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go
generated
vendored
Normal file
88
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package pluginutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/wrapping"
|
||||
)
|
||||
|
||||
// Looker defines the plugin Lookup function that looks into the plugin catalog
|
||||
// for available plugins and returns a PluginRunner
|
||||
type Looker interface {
|
||||
LookupPlugin(context.Context, string, consts.PluginType) (*PluginRunner, error)
|
||||
}
|
||||
|
||||
// RunnerUtil interface defines the functions needed by the runner to wrap the
|
||||
// metadata needed to run a plugin process. This includes looking up Mlock
|
||||
// configuration and wrapping data in a response wrapped token.
|
||||
// logical.SystemView implementations satisfy this interface.
|
||||
type RunnerUtil interface {
|
||||
ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
|
||||
MlockEnabled() bool
|
||||
}
|
||||
|
||||
// LookRunnerUtil defines the functions for both Looker and Wrapper
|
||||
type LookRunnerUtil interface {
|
||||
Looker
|
||||
RunnerUtil
|
||||
}
|
||||
|
||||
// PluginRunner defines the metadata needed to run a plugin securely with
|
||||
// go-plugin.
|
||||
type PluginRunner struct {
|
||||
Name string `json:"name" structs:"name"`
|
||||
Type consts.PluginType `json:"type" structs:"type"`
|
||||
Command string `json:"command" structs:"command"`
|
||||
Args []string `json:"args" structs:"args"`
|
||||
Env []string `json:"env" structs:"env"`
|
||||
Sha256 []byte `json:"sha256" structs:"sha256"`
|
||||
Builtin bool `json:"builtin" structs:"builtin"`
|
||||
BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"`
|
||||
}
|
||||
|
||||
// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and
|
||||
// returns a configured plugin.Client with TLS Configured and a wrapping token set
|
||||
// on PluginUnwrapTokenEnv for plugin process consumption.
|
||||
func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
|
||||
return r.RunConfig(ctx,
|
||||
Runner(wrapper),
|
||||
PluginSets(pluginSets),
|
||||
HandshakeConfig(hs),
|
||||
Env(env...),
|
||||
Logger(logger),
|
||||
MetadataMode(false),
|
||||
)
|
||||
}
|
||||
|
||||
// RunMetadataMode returns a configured plugin.Client that will dispense a plugin
|
||||
// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to
|
||||
// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider.
|
||||
func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
|
||||
return r.RunConfig(ctx,
|
||||
Runner(wrapper),
|
||||
PluginSets(pluginSets),
|
||||
HandshakeConfig(hs),
|
||||
Env(env...),
|
||||
Logger(logger),
|
||||
MetadataMode(true),
|
||||
)
|
||||
}
|
||||
|
||||
// CtxCancelIfCanceled takes a context cancel func and a context. If the context is
|
||||
// shutdown the cancelfunc is called. This is useful for merging two cancel
|
||||
// functions.
|
||||
func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} {
|
||||
quitCh := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-quitCh:
|
||||
case <-ctxCanceler.Done():
|
||||
f()
|
||||
}
|
||||
}()
|
||||
return quitCh
|
||||
}
|
108
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go
generated
vendored
Normal file
108
vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package pluginutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/helper/certutil"
|
||||
)
|
||||
|
||||
// generateCert is used internally to create certificates for the plugin
|
||||
// client and server.
|
||||
func generateCert() ([]byte, *ecdsa.PrivateKey, error) {
|
||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
host, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sn, err := certutil.GenerateSerialNumber()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
template := &x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: host,
|
||||
},
|
||||
DNSNames: []string{host},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
|
||||
SerialNumber: sn,
|
||||
NotBefore: time.Now().Add(-30 * time.Second),
|
||||
NotAfter: time.Now().Add(262980 * time.Hour),
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
|
||||
if err != nil {
|
||||
return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err)
|
||||
}
|
||||
|
||||
return certBytes, key, nil
|
||||
}
|
||||
|
||||
// createClientTLSConfig creates a signed certificate and returns a configured
|
||||
// TLS config.
|
||||
func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) {
|
||||
clientCert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err)
|
||||
}
|
||||
|
||||
cert := tls.Certificate{
|
||||
Certificate: [][]byte{certBytes},
|
||||
PrivateKey: key,
|
||||
Leaf: clientCert,
|
||||
}
|
||||
|
||||
clientCertPool := x509.NewCertPool()
|
||||
clientCertPool.AddCert(clientCert)
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: clientCertPool,
|
||||
ClientCAs: clientCertPool,
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
ServerName: clientCert.Subject.CommonName,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
tlsConfig.BuildNameToCertificate()
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// wrapServerConfig is used to create a server certificate and private key, then
|
||||
// wrap them in an unwrap token for later retrieval by the plugin.
|
||||
func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) {
|
||||
rawKey, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{
|
||||
"ServerCert": certBytes,
|
||||
"ServerKey": rawKey,
|
||||
}, time.Second*60, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return wrapInfo.Token, nil
|
||||
}
|
37
vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package wrapping
|
||||
|
||||
import "time"
|
||||
|
||||
type ResponseWrapInfo struct {
|
||||
// Setting to non-zero specifies that the response should be wrapped.
|
||||
// Specifies the desired TTL of the wrapping token.
|
||||
TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""`
|
||||
|
||||
// The token containing the wrapped response
|
||||
Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""`
|
||||
|
||||
// The token accessor for the wrapped response token
|
||||
Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"`
|
||||
|
||||
// The creation time. This can be used with the TTL to figure out an
|
||||
// expected expiration.
|
||||
CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""`
|
||||
|
||||
// If the contained response is the output of a token creation call, the
|
||||
// created token's accessor will be accessible here
|
||||
WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""`
|
||||
|
||||
// WrappedEntityID is the entity identifier of the caller who initiated the
|
||||
// wrapping request
|
||||
WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""`
|
||||
|
||||
// The format to use. This doesn't get returned, it's only internal.
|
||||
Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""`
|
||||
|
||||
// CreationPath is the original request path that was used to create
|
||||
// the wrapped response.
|
||||
CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""`
|
||||
|
||||
// Controls seal wrapping behavior downstream for specific use cases
|
||||
SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""`
|
||||
}
|
19
vendor/github.com/hashicorp/vault/sdk/logical/audit.go
generated
vendored
Normal file
19
vendor/github.com/hashicorp/vault/sdk/logical/audit.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package logical
|
||||
|
||||
type LogInput struct {
|
||||
Type string
|
||||
Auth *Auth
|
||||
Request *Request
|
||||
Response *Response
|
||||
OuterErr error
|
||||
NonHMACReqDataKeys []string
|
||||
NonHMACRespDataKeys []string
|
||||
}
|
||||
|
||||
type MarshalOptions struct {
|
||||
ValueHasher func(string) string
|
||||
}
|
||||
|
||||
type OptMarshaler interface {
|
||||
MarshalJSONWithOptions(*MarshalOptions) ([]byte, error)
|
||||
}
|
107
vendor/github.com/hashicorp/vault/sdk/logical/auth.go
generated
vendored
Normal file
107
vendor/github.com/hashicorp/vault/sdk/logical/auth.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
sockaddr "github.com/hashicorp/go-sockaddr"
|
||||
)
|
||||
|
||||
// Auth is the resulting authentication information that is part of
|
||||
// Response for credential backends.
|
||||
type Auth struct {
|
||||
LeaseOptions
|
||||
|
||||
// InternalData is JSON-encodable data that is stored with the auth struct.
|
||||
// This will be sent back during a Renew/Revoke for storing internal data
|
||||
// used for those operations.
|
||||
InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"`
|
||||
|
||||
// DisplayName is a non-security sensitive identifier that is
|
||||
// applicable to this Auth. It is used for logging and prefixing
|
||||
// of dynamic secrets. For example, DisplayName may be "armon" for
|
||||
// the github credential backend. If the client token is used to
|
||||
// generate a SQL credential, the user may be "github-armon-uuid".
|
||||
// This is to help identify the source without using audit tables.
|
||||
DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
|
||||
|
||||
// Policies is the list of policies that the authenticated user
|
||||
// is associated with.
|
||||
Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
|
||||
|
||||
// TokenPolicies and IdentityPolicies break down the list in Policies to
|
||||
// help determine where a policy was sourced
|
||||
TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"`
|
||||
IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"`
|
||||
|
||||
// ExternalNamespacePolicies represent the policies authorized from
|
||||
// different namespaces indexed by respective namespace identifiers
|
||||
ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"`
|
||||
|
||||
// Indicates that the default policy should not be added by core when
|
||||
// creating a token. The default policy will still be added if it's
|
||||
// explicitly defined.
|
||||
NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"`
|
||||
|
||||
// Metadata is used to attach arbitrary string-type metadata to
|
||||
// an authenticated user. This metadata will be outputted into the
|
||||
// audit log.
|
||||
Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"`
|
||||
|
||||
// ClientToken is the token that is generated for the authentication.
|
||||
// This will be filled in by Vault core when an auth structure is
|
||||
// returned. Setting this manually will have no effect.
|
||||
ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"`
|
||||
|
||||
// Accessor is the identifier for the ClientToken. This can be used
|
||||
// to perform management functionalities (especially revocation) when
|
||||
// ClientToken in the audit logs are obfuscated. Accessor can be used
|
||||
// to revoke a ClientToken and to lookup the capabilities of the ClientToken,
|
||||
// both without actually knowing the ClientToken.
|
||||
Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"`
|
||||
|
||||
// Period indicates that the token generated using this Auth object
|
||||
// should never expire. The token should be renewed within the duration
|
||||
// specified by this period.
|
||||
Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
|
||||
|
||||
// ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal
|
||||
// tokens, this value is constrained by the configured max ttl.
|
||||
ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
|
||||
|
||||
// Number of allowed uses of the issued token
|
||||
NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
|
||||
|
||||
// EntityID is the identifier of the entity in identity store to which the
|
||||
// identity of the authenticating client belongs to.
|
||||
EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"`
|
||||
|
||||
// Alias is the information about the authenticated client returned by
|
||||
// the auth backend
|
||||
Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"`
|
||||
|
||||
// GroupAliases are the informational mappings of external groups which an
|
||||
// authenticated user belongs to. This is used to check if there are
|
||||
// mappings groups for the group aliases in identity store. For all the
|
||||
// matching groups, the entity ID of the user will be added.
|
||||
GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"`
|
||||
|
||||
// The set of CIDRs that this token can be used with
|
||||
BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"`
|
||||
|
||||
// CreationPath is a path that the backend can return to use in the lease.
|
||||
// This is currently only supported for the token store where roles may
|
||||
// change the perceived path of the lease, even though they don't change
|
||||
// the request path itself.
|
||||
CreationPath string `json:"creation_path"`
|
||||
|
||||
// TokenType is the type of token being requested
|
||||
TokenType TokenType `json:"token_type"`
|
||||
|
||||
// Orphan is set if the token does not have a parent
|
||||
Orphan bool `json:"orphan"`
|
||||
}
|
||||
|
||||
func (a *Auth) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *a)
|
||||
}
|
15
vendor/github.com/hashicorp/vault/sdk/logical/connection.go
generated
vendored
Normal file
15
vendor/github.com/hashicorp/vault/sdk/logical/connection.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Connection represents the connection information for a request. This
|
||||
// is present on the Request structure for credential backends.
|
||||
type Connection struct {
|
||||
// RemoteAddr is the network address that sent the request.
|
||||
RemoteAddr string `json:"remote_addr"`
|
||||
|
||||
// ConnState is the TLS connection state if applicable.
|
||||
ConnState *tls.ConnectionState `sentinel:""`
|
||||
}
|
17
vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go
generated
vendored
Normal file
17
vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ControlGroup struct {
|
||||
Authorizations []*Authz `json:"authorizations"`
|
||||
RequestTime time.Time `json:"request_time"`
|
||||
Approved bool `json:"approved"`
|
||||
NamespaceID string `json:"namespace_id"`
|
||||
}
|
||||
|
||||
type Authz struct {
|
||||
Token string `json:"token"`
|
||||
AuthorizationTime time.Time `json:"authorization_time"`
|
||||
}
|
113
vendor/github.com/hashicorp/vault/sdk/logical/error.go
generated
vendored
Normal file
113
vendor/github.com/hashicorp/vault/sdk/logical/error.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package logical
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrUnsupportedOperation is returned if the operation is not supported
|
||||
// by the logical backend.
|
||||
ErrUnsupportedOperation = errors.New("unsupported operation")
|
||||
|
||||
// ErrUnsupportedPath is returned if the path is not supported
|
||||
// by the logical backend.
|
||||
ErrUnsupportedPath = errors.New("unsupported path")
|
||||
|
||||
// ErrInvalidRequest is returned if the request is invalid
|
||||
ErrInvalidRequest = errors.New("invalid request")
|
||||
|
||||
// ErrPermissionDenied is returned if the client is not authorized
|
||||
ErrPermissionDenied = errors.New("permission denied")
|
||||
|
||||
// ErrMultiAuthzPending is returned if the the request needs more
|
||||
// authorizations
|
||||
ErrMultiAuthzPending = errors.New("request needs further approval")
|
||||
|
||||
// ErrUpstreamRateLimited is returned when Vault receives a rate limited
|
||||
// response from an upstream
|
||||
ErrUpstreamRateLimited = errors.New("upstream rate limited")
|
||||
|
||||
// ErrPerfStandbyForward is returned when Vault is in a state such that a
|
||||
// perf standby cannot satisfy a request
|
||||
ErrPerfStandbyPleaseForward = errors.New("please forward to the active node")
|
||||
|
||||
// ErrLeaseCountQuotaExceeded is returned when a request is rejected due to a lease
|
||||
// count quota being exceeded.
|
||||
ErrLeaseCountQuotaExceeded = errors.New("lease count quota exceeded")
|
||||
|
||||
// ErrRateLimitQuotaExceeded is returned when a request is rejected due to a
|
||||
// rate limit quota being exceeded.
|
||||
ErrRateLimitQuotaExceeded = errors.New("rate limit quota exceeded")
|
||||
|
||||
// ErrUnrecoverable is returned when a request fails due to something that
|
||||
// is likely to require manual intervention. This is a generic form of an
|
||||
// unrecoverable error.
|
||||
// e.g.: misconfigured or disconnected storage backend.
|
||||
ErrUnrecoverable = errors.New("unrecoverable error")
|
||||
|
||||
// ErrMissingRequiredState is returned when a request can't be satisfied
|
||||
// with the data in the local node's storage, based on the provided
|
||||
// X-Vault-Index request header.
|
||||
ErrMissingRequiredState = errors.New("required index state not present")
|
||||
)
|
||||
|
||||
type HTTPCodedError interface {
|
||||
Error() string
|
||||
Code() int
|
||||
}
|
||||
|
||||
func CodedError(status int, msg string) HTTPCodedError {
|
||||
return &codedError{
|
||||
Status: status,
|
||||
Message: msg,
|
||||
}
|
||||
}
|
||||
|
||||
var _ HTTPCodedError = (*codedError)(nil)
|
||||
|
||||
type codedError struct {
|
||||
Status int
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *codedError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func (e *codedError) Code() int {
|
||||
return e.Status
|
||||
}
|
||||
|
||||
// Struct to identify user input errors. This is helpful in responding the
|
||||
// appropriate status codes to clients from the HTTP endpoints.
|
||||
type StatusBadRequest struct {
|
||||
Err string
|
||||
}
|
||||
|
||||
// Implementing error interface
|
||||
func (s *StatusBadRequest) Error() string {
|
||||
return s.Err
|
||||
}
|
||||
|
||||
// This is a new type declared to not cause potential compatibility problems if
|
||||
// the logic around the CodedError changes; in particular for logical request
|
||||
// paths it is basically ignored, and changing that behavior might cause
|
||||
// unforeseen issues.
|
||||
type ReplicationCodedError struct {
|
||||
Msg string
|
||||
Code int
|
||||
}
|
||||
|
||||
func (r *ReplicationCodedError) Error() string {
|
||||
return r.Msg
|
||||
}
|
||||
|
||||
type KeyNotFoundError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *KeyNotFoundError) WrappedErrors() []error {
|
||||
return []error{e.Err}
|
||||
}
|
||||
|
||||
func (e *KeyNotFoundError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
449
vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go
generated
vendored
Normal file
449
vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go
generated
vendored
Normal file
@ -0,0 +1,449 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.15.8
|
||||
// source: sdk/logical/identity.proto
|
||||
|
||||
package logical
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type Entity struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// ID is the unique identifier for the entity
|
||||
ID string `sentinel:"" protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
// Name is the human-friendly unique identifier for the entity
|
||||
Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Aliases contains thhe alias mappings for the given entity
|
||||
Aliases []*Alias `sentinel:"" protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"`
|
||||
// Metadata represents the custom data tied to this entity
|
||||
Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Disabled is true if the entity is disabled.
|
||||
Disabled bool `sentinel:"" protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"`
|
||||
// NamespaceID is the identifier of the namespace to which this entity
|
||||
// belongs to.
|
||||
NamespaceID string `sentinel:"" protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Entity) Reset() {
|
||||
*x = Entity{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Entity) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Entity) ProtoMessage() {}
|
||||
|
||||
func (x *Entity) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Entity.ProtoReflect.Descriptor instead.
|
||||
func (*Entity) Descriptor() ([]byte, []int) {
|
||||
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Entity) GetID() string {
|
||||
if x != nil {
|
||||
return x.ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Entity) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Entity) GetAliases() []*Alias {
|
||||
if x != nil {
|
||||
return x.Aliases
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Entity) GetMetadata() map[string]string {
|
||||
if x != nil {
|
||||
return x.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Entity) GetDisabled() bool {
|
||||
if x != nil {
|
||||
return x.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Entity) GetNamespaceID() string {
|
||||
if x != nil {
|
||||
return x.NamespaceID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Alias struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// MountType is the backend mount's type to which this identity belongs
|
||||
MountType string `sentinel:"" protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
|
||||
// MountAccessor is the identifier of the mount entry to which this
|
||||
// identity belongs
|
||||
MountAccessor string `sentinel:"" protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
|
||||
// Name is the identifier of this identity in its authentication source
|
||||
Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Metadata represents the custom data tied to this alias. Fields added
|
||||
// to it should have a low rate of change (or no change) because each
|
||||
// change incurs a storage write, so quickly-changing fields can have
|
||||
// a significant performance impact at scale. See the SDK's
|
||||
// "aliasmetadata" package for a helper that eases and standardizes
|
||||
// using this safely.
|
||||
Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// ID is the unique identifier for the alias
|
||||
ID string `sentinel:"" protobuf:"bytes,5,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
// NamespaceID is the identifier of the namespace to which this alias
|
||||
// belongs.
|
||||
NamespaceID string `sentinel:"" protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Alias) Reset() {
|
||||
*x = Alias{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Alias) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Alias) ProtoMessage() {}
|
||||
|
||||
func (x *Alias) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Alias.ProtoReflect.Descriptor instead.
|
||||
func (*Alias) Descriptor() ([]byte, []int) {
|
||||
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Alias) GetMountType() string {
|
||||
if x != nil {
|
||||
return x.MountType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Alias) GetMountAccessor() string {
|
||||
if x != nil {
|
||||
return x.MountAccessor
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Alias) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Alias) GetMetadata() map[string]string {
|
||||
if x != nil {
|
||||
return x.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Alias) GetID() string {
|
||||
if x != nil {
|
||||
return x.ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Alias) GetNamespaceID() string {
|
||||
if x != nil {
|
||||
return x.NamespaceID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// ID is the unique identifier for the group
|
||||
ID string `sentinel:"" protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
// Name is the human-friendly unique identifier for the group
|
||||
Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Metadata represents the custom data tied to this group
|
||||
Metadata map[string]string `sentinel:"" protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// NamespaceID is the identifier of the namespace to which this group
|
||||
// belongs to.
|
||||
NamespaceID string `sentinel:"" protobuf:"bytes,4,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Group) Reset() {
|
||||
*x = Group{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Group) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Group) ProtoMessage() {}
|
||||
|
||||
func (x *Group) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sdk_logical_identity_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Group.ProtoReflect.Descriptor instead.
|
||||
func (*Group) Descriptor() ([]byte, []int) {
|
||||
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Group) GetID() string {
|
||||
if x != nil {
|
||||
return x.ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetMetadata() map[string]string {
|
||||
if x != nil {
|
||||
return x.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Group) GetNamespaceID() string {
|
||||
if x != nil {
|
||||
return x.NamespaceID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_sdk_logical_identity_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_sdk_logical_identity_proto_rawDesc = []byte{
|
||||
0x0a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64,
|
||||
0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f,
|
||||
0x67, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8d, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18,
|
||||
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e,
|
||||
0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x39,
|
||||
0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74,
|
||||
0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
|
||||
0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
|
||||
0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12,
|
||||
0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25,
|
||||
0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63,
|
||||
0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74,
|
||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f,
|
||||
0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61,
|
||||
0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64,
|
||||
0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x02, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||
0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||
0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a,
|
||||
0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72,
|
||||
0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b,
|
||||
0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
|
||||
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f,
|
||||
0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_sdk_logical_identity_proto_rawDescOnce sync.Once
|
||||
file_sdk_logical_identity_proto_rawDescData = file_sdk_logical_identity_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_sdk_logical_identity_proto_rawDescGZIP() []byte {
|
||||
file_sdk_logical_identity_proto_rawDescOnce.Do(func() {
|
||||
file_sdk_logical_identity_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_identity_proto_rawDescData)
|
||||
})
|
||||
return file_sdk_logical_identity_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_sdk_logical_identity_proto_goTypes = []interface{}{
|
||||
(*Entity)(nil), // 0: logical.Entity
|
||||
(*Alias)(nil), // 1: logical.Alias
|
||||
(*Group)(nil), // 2: logical.Group
|
||||
nil, // 3: logical.Entity.MetadataEntry
|
||||
nil, // 4: logical.Alias.MetadataEntry
|
||||
nil, // 5: logical.Group.MetadataEntry
|
||||
}
|
||||
var file_sdk_logical_identity_proto_depIDxs = []int32{
|
||||
1, // 0: logical.Entity.aliases:type_name -> logical.Alias
|
||||
3, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry
|
||||
4, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry
|
||||
5, // 3: logical.Group.metadata:type_name -> logical.Group.MetadataEntry
|
||||
4, // [4:4] is the sub-list for method output_type
|
||||
4, // [4:4] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_sdk_logical_identity_proto_init() }
|
||||
func file_sdk_logical_identity_proto_init() {
|
||||
if File_sdk_logical_identity_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Entity); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Alias); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Group); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_sdk_logical_identity_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_sdk_logical_identity_proto_goTypes,
|
||||
DependencyIndexes: file_sdk_logical_identity_proto_depIDxs,
|
||||
MessageInfos: file_sdk_logical_identity_proto_msgTypes,
|
||||
}.Build()
|
||||
File_sdk_logical_identity_proto = out.File
|
||||
file_sdk_logical_identity_proto_rawDesc = nil
|
||||
file_sdk_logical_identity_proto_goTypes = nil
|
||||
file_sdk_logical_identity_proto_depIDxs = nil
|
||||
}
|
68
vendor/github.com/hashicorp/vault/sdk/logical/identity.proto
generated
vendored
Normal file
68
vendor/github.com/hashicorp/vault/sdk/logical/identity.proto
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hashicorp/vault/sdk/logical";
|
||||
|
||||
package logical;
|
||||
|
||||
message Entity {
|
||||
// ID is the unique identifier for the entity
|
||||
string ID = 1;
|
||||
|
||||
// Name is the human-friendly unique identifier for the entity
|
||||
string name = 2;
|
||||
|
||||
// Aliases contains thhe alias mappings for the given entity
|
||||
repeated Alias aliases = 3;
|
||||
|
||||
// Metadata represents the custom data tied to this entity
|
||||
map<string, string> metadata = 4;
|
||||
|
||||
// Disabled is true if the entity is disabled.
|
||||
bool disabled = 5;
|
||||
|
||||
// NamespaceID is the identifier of the namespace to which this entity
|
||||
// belongs to.
|
||||
string namespace_id = 6;
|
||||
}
|
||||
|
||||
message Alias {
|
||||
// MountType is the backend mount's type to which this identity belongs
|
||||
string mount_type = 1;
|
||||
|
||||
// MountAccessor is the identifier of the mount entry to which this
|
||||
// identity belongs
|
||||
string mount_accessor = 2;
|
||||
|
||||
// Name is the identifier of this identity in its authentication source
|
||||
string name = 3;
|
||||
|
||||
// Metadata represents the custom data tied to this alias. Fields added
|
||||
// to it should have a low rate of change (or no change) because each
|
||||
// change incurs a storage write, so quickly-changing fields can have
|
||||
// a significant performance impact at scale. See the SDK's
|
||||
// "aliasmetadata" package for a helper that eases and standardizes
|
||||
// using this safely.
|
||||
map<string, string> metadata = 4;
|
||||
|
||||
// ID is the unique identifier for the alias
|
||||
string ID = 5;
|
||||
|
||||
// NamespaceID is the identifier of the namespace to which this alias
|
||||
// belongs.
|
||||
string namespace_id = 6;
|
||||
}
|
||||
|
||||
message Group {
|
||||
// ID is the unique identifier for the group
|
||||
string ID = 1;
|
||||
|
||||
// Name is the human-friendly unique identifier for the group
|
||||
string name = 2;
|
||||
|
||||
// Metadata represents the custom data tied to this group
|
||||
map<string, string> metadata = 3;
|
||||
|
||||
// NamespaceID is the identifier of the namespace to which this group
|
||||
// belongs to.
|
||||
string namespace_id = 4;
|
||||
}
|
53
vendor/github.com/hashicorp/vault/sdk/logical/lease.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/vault/sdk/logical/lease.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// LeaseOptions is an embeddable struct to capture common lease
|
||||
// settings between a Secret and Auth
|
||||
type LeaseOptions struct {
|
||||
// TTL is the duration that this secret is valid for. Vault
|
||||
// will automatically revoke it after the duration.
|
||||
TTL time.Duration `json:"lease"`
|
||||
|
||||
// MaxTTL is the maximum duration that this secret is valid for.
|
||||
MaxTTL time.Duration `json:"max_ttl"`
|
||||
|
||||
// Renewable, if true, means that this secret can be renewed.
|
||||
Renewable bool `json:"renewable"`
|
||||
|
||||
// Increment will be the lease increment that the user requested.
|
||||
// This is only available on a Renew operation and has no effect
|
||||
// when returning a response.
|
||||
Increment time.Duration `json:"-"`
|
||||
|
||||
// IssueTime is the time of issue for the original lease. This is
|
||||
// only available on Renew and Revoke operations and has no effect when returning
|
||||
// a response. It can be used to enforce maximum lease periods by
|
||||
// a logical backend.
|
||||
IssueTime time.Time `json:"-"`
|
||||
}
|
||||
|
||||
// LeaseEnabled checks if leasing is enabled
|
||||
func (l *LeaseOptions) LeaseEnabled() bool {
|
||||
return l.TTL > 0
|
||||
}
|
||||
|
||||
// LeaseTotal is the lease duration with a guard against a negative TTL
|
||||
func (l *LeaseOptions) LeaseTotal() time.Duration {
|
||||
if l.TTL <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return l.TTL
|
||||
}
|
||||
|
||||
// ExpirationTime computes the time until expiration including the grace period
|
||||
func (l *LeaseOptions) ExpirationTime() time.Time {
|
||||
var expireTime time.Time
|
||||
if l.LeaseEnabled() {
|
||||
expireTime = time.Now().Add(l.LeaseTotal())
|
||||
}
|
||||
return expireTime
|
||||
}
|
135
vendor/github.com/hashicorp/vault/sdk/logical/logical.go
generated
vendored
Normal file
135
vendor/github.com/hashicorp/vault/sdk/logical/logical.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
// BackendType is the type of backend that is being implemented
|
||||
type BackendType uint32
|
||||
|
||||
// The these are the types of backends that can be derived from
|
||||
// logical.Backend
|
||||
const (
|
||||
TypeUnknown BackendType = 0 // This is also the zero-value for BackendType
|
||||
TypeLogical BackendType = 1
|
||||
TypeCredential BackendType = 2
|
||||
)
|
||||
|
||||
// Stringer implementation
|
||||
func (b BackendType) String() string {
|
||||
switch b {
|
||||
case TypeLogical:
|
||||
return "secret"
|
||||
case TypeCredential:
|
||||
return "auth"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// Backend interface must be implemented to be "mountable" at
|
||||
// a given path. Requests flow through a router which has various mount
|
||||
// points that flow to a logical backend. The logic of each backend is flexible,
|
||||
// and this is what allows materialized keys to function. There can be specialized
|
||||
// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can
|
||||
// interact with remote APIs to generate keys dynamically. This interface also
|
||||
// allows for a "procfs" like interaction, as internal state can be exposed by
|
||||
// acting like a logical backend and being mounted.
|
||||
type Backend interface {
|
||||
|
||||
// Initialize is used to initialize a plugin after it has been mounted.
|
||||
Initialize(context.Context, *InitializationRequest) error
|
||||
|
||||
// HandleRequest is used to handle a request and generate a response.
|
||||
// The backends must check the operation type and handle appropriately.
|
||||
HandleRequest(context.Context, *Request) (*Response, error)
|
||||
|
||||
// SpecialPaths is a list of paths that are special in some way.
|
||||
// See PathType for the types of special paths. The key is the type
|
||||
// of the special path, and the value is a list of paths for this type.
|
||||
// This is not a regular expression but is an exact match. If the path
|
||||
// ends in '*' then it is a prefix-based match. The '*' can only appear
|
||||
// at the end.
|
||||
SpecialPaths() *Paths
|
||||
|
||||
// System provides an interface to access certain system configuration
|
||||
// information, such as globally configured default and max lease TTLs.
|
||||
System() SystemView
|
||||
|
||||
// Logger provides an interface to access the underlying logger. This
|
||||
// is useful when a struct embeds a Backend-implemented struct that
|
||||
// contains a private instance of logger.
|
||||
Logger() log.Logger
|
||||
|
||||
// HandleExistenceCheck is used to handle a request and generate a response
|
||||
// indicating whether the given path exists or not; this is used to
|
||||
// understand whether the request must have a Create or Update capability
|
||||
// ACL applied. The first bool indicates whether an existence check
|
||||
// function was found for the backend; the second indicates whether, if an
|
||||
// existence check function was found, the item exists or not.
|
||||
HandleExistenceCheck(context.Context, *Request) (bool, bool, error)
|
||||
|
||||
// Cleanup is invoked during an unmount of a backend to allow it to
|
||||
// handle any cleanup like connection closing or releasing of file handles.
|
||||
Cleanup(context.Context)
|
||||
|
||||
// InvalidateKey may be invoked when an object is modified that belongs
|
||||
// to the backend. The backend can use this to clear any caches or reset
|
||||
// internal state as needed.
|
||||
InvalidateKey(context.Context, string)
|
||||
|
||||
// Setup is used to set up the backend based on the provided backend
|
||||
// configuration.
|
||||
Setup(context.Context, *BackendConfig) error
|
||||
|
||||
// Type returns the BackendType for the particular backend
|
||||
Type() BackendType
|
||||
}
|
||||
|
||||
// BackendConfig is provided to the factory to initialize the backend
|
||||
type BackendConfig struct {
|
||||
// View should not be stored, and should only be used for initialization
|
||||
StorageView Storage
|
||||
|
||||
// The backend should use this logger. The log should not contain any secrets.
|
||||
Logger log.Logger
|
||||
|
||||
// System provides a view into a subset of safe system information that
|
||||
// is useful for backends, such as the default/max lease TTLs
|
||||
System SystemView
|
||||
|
||||
// BackendUUID is a unique identifier provided to this backend. It's useful
|
||||
// when a backend needs a consistent and unique string without using storage.
|
||||
BackendUUID string
|
||||
|
||||
// Config is the opaque user configuration provided when mounting
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Factory is the factory function to create a logical backend.
|
||||
type Factory func(context.Context, *BackendConfig) (Backend, error)
|
||||
|
||||
// Paths is the structure of special paths that is used for SpecialPaths.
|
||||
type Paths struct {
|
||||
// Root are the paths that require a root token to access
|
||||
Root []string
|
||||
|
||||
// Unauthenticated are the paths that can be accessed without any auth.
|
||||
Unauthenticated []string
|
||||
|
||||
// LocalStorage are paths (prefixes) that are local to this instance; this
|
||||
// indicates that these paths should not be replicated
|
||||
LocalStorage []string
|
||||
|
||||
// SealWrapStorage are storage paths that, when using a capable seal,
|
||||
// should be seal wrapped with extra encryption. It is exact matching
|
||||
// unless it ends with '/' in which case it will be treated as a prefix.
|
||||
SealWrapStorage []string
|
||||
}
|
||||
|
||||
type Auditor interface {
|
||||
AuditRequest(ctx context.Context, input *LogInput) error
|
||||
AuditResponse(ctx context.Context, input *LogInput) error
|
||||
}
|
52
vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
)
|
||||
|
||||
type LogicalStorage struct {
|
||||
underlying physical.Backend
|
||||
}
|
||||
|
||||
func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) {
|
||||
entry, err := s.underlying.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &StorageEntry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error {
|
||||
return s.underlying.Put(ctx, &physical.Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *LogicalStorage) Delete(ctx context.Context, key string) error {
|
||||
return s.underlying.Delete(ctx, key)
|
||||
}
|
||||
|
||||
func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
return s.underlying.List(ctx, prefix)
|
||||
}
|
||||
|
||||
func (s *LogicalStorage) Underlying() physical.Backend {
|
||||
return s.underlying
|
||||
}
|
||||
|
||||
func NewLogicalStorage(underlying physical.Backend) *LogicalStorage {
|
||||
return &LogicalStorage{
|
||||
underlying: underlying,
|
||||
}
|
||||
}
|
151
vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go
generated
vendored
Normal file
151
vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.15.8
|
||||
// source: sdk/logical/plugin.proto
|
||||
|
||||
package logical
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type PluginEnvironment struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// VaultVersion is the version of the Vault server
|
||||
VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PluginEnvironment) Reset() {
|
||||
*x = PluginEnvironment{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sdk_logical_plugin_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PluginEnvironment) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PluginEnvironment) ProtoMessage() {}
|
||||
|
||||
func (x *PluginEnvironment) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sdk_logical_plugin_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PluginEnvironment.ProtoReflect.Descriptor instead.
|
||||
func (*PluginEnvironment) Descriptor() ([]byte, []int) {
|
||||
return file_sdk_logical_plugin_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PluginEnvironment) GetVaultVersion() string {
|
||||
if x != nil {
|
||||
return x.VaultVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_sdk_logical_plugin_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_sdk_logical_plugin_proto_rawDesc = []byte{
|
||||
0x0a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c,
|
||||
0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69,
|
||||
0x63, 0x61, 0x6c, 0x22, 0x38, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76,
|
||||
0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x75, 0x6c,
|
||||
0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x28, 0x5a,
|
||||
0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f,
|
||||
0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_sdk_logical_plugin_proto_rawDescOnce sync.Once
|
||||
file_sdk_logical_plugin_proto_rawDescData = file_sdk_logical_plugin_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_sdk_logical_plugin_proto_rawDescGZIP() []byte {
|
||||
file_sdk_logical_plugin_proto_rawDescOnce.Do(func() {
|
||||
file_sdk_logical_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_plugin_proto_rawDescData)
|
||||
})
|
||||
return file_sdk_logical_plugin_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_sdk_logical_plugin_proto_goTypes = []interface{}{
|
||||
(*PluginEnvironment)(nil), // 0: logical.PluginEnvironment
|
||||
}
|
||||
var file_sdk_logical_plugin_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_sdk_logical_plugin_proto_init() }
|
||||
func file_sdk_logical_plugin_proto_init() {
|
||||
if File_sdk_logical_plugin_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PluginEnvironment); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_sdk_logical_plugin_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_sdk_logical_plugin_proto_goTypes,
|
||||
DependencyIndexes: file_sdk_logical_plugin_proto_depIdxs,
|
||||
MessageInfos: file_sdk_logical_plugin_proto_msgTypes,
|
||||
}.Build()
|
||||
File_sdk_logical_plugin_proto = out.File
|
||||
file_sdk_logical_plugin_proto_rawDesc = nil
|
||||
file_sdk_logical_plugin_proto_goTypes = nil
|
||||
file_sdk_logical_plugin_proto_depIdxs = nil
|
||||
}
|
10
vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto
generated
vendored
Normal file
10
vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hashicorp/vault/sdk/logical";
|
||||
|
||||
package logical;
|
||||
|
||||
message PluginEnvironment {
|
||||
// VaultVersion is the version of the Vault server
|
||||
string vault_version = 1;
|
||||
}
|
372
vendor/github.com/hashicorp/vault/sdk/logical/request.go
generated
vendored
Normal file
372
vendor/github.com/hashicorp/vault/sdk/logical/request.go
generated
vendored
Normal file
@ -0,0 +1,372 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/copystructure"
|
||||
)
|
||||
|
||||
// RequestWrapInfo is a struct that stores information about desired response
|
||||
// and seal wrapping behavior
|
||||
type RequestWrapInfo struct {
|
||||
// Setting to non-zero specifies that the response should be wrapped.
|
||||
// Specifies the desired TTL of the wrapping token.
|
||||
TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""`
|
||||
|
||||
// The format to use for the wrapped response; if not specified it's a bare
|
||||
// token
|
||||
Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""`
|
||||
|
||||
// A flag to conforming backends that data for a given request should be
|
||||
// seal wrapped
|
||||
SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""`
|
||||
}
|
||||
|
||||
func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch key {
|
||||
case "ttl":
|
||||
return r.TTL, nil
|
||||
case "ttl_seconds":
|
||||
return int64(r.TTL.Seconds()), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RequestWrapInfo) SentinelKeys() []string {
|
||||
return []string{
|
||||
"ttl",
|
||||
"ttl_seconds",
|
||||
}
|
||||
}
|
||||
|
||||
type ClientTokenSource uint32
|
||||
|
||||
const (
|
||||
NoClientToken ClientTokenSource = iota
|
||||
ClientTokenFromVaultHeader
|
||||
ClientTokenFromAuthzHeader
|
||||
)
|
||||
|
||||
type WALState struct {
|
||||
ClusterID string
|
||||
LocalIndex uint64
|
||||
ReplicatedIndex uint64
|
||||
}
|
||||
|
||||
const indexStateCtxKey = "index_state"
|
||||
|
||||
// IndexStateContext returns a context with an added value holding the index
|
||||
// state that should be populated on writes.
|
||||
func IndexStateContext(ctx context.Context, state *WALState) context.Context {
|
||||
return context.WithValue(ctx, indexStateCtxKey, state)
|
||||
}
|
||||
|
||||
// IndexStateFromContext is a helper to look up if the provided context contains
|
||||
// an index state pointer.
|
||||
func IndexStateFromContext(ctx context.Context) *WALState {
|
||||
s, ok := ctx.Value(indexStateCtxKey).(*WALState)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Request is a struct that stores the parameters and context of a request
|
||||
// being made to Vault. It is used to abstract the details of the higher level
|
||||
// request protocol from the handlers.
|
||||
//
|
||||
// Note: Many of these have Sentinel disabled because they are values populated
|
||||
// by the router after policy checks; the token namespace would be the right
|
||||
// place to access them via Sentinel
|
||||
type Request struct {
|
||||
// Id is the uuid associated with each request
|
||||
ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""`
|
||||
|
||||
// If set, the name given to the replication secondary where this request
|
||||
// originated
|
||||
ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""`
|
||||
|
||||
// Operation is the requested operation type
|
||||
Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"`
|
||||
|
||||
// Path is the full path of the request
|
||||
Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""`
|
||||
|
||||
// Request data is an opaque map that must have string keys.
|
||||
Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"`
|
||||
|
||||
// Storage can be used to durably store and retrieve state.
|
||||
Storage Storage `json:"-" sentinel:""`
|
||||
|
||||
// Secret will be non-nil only for Revoke and Renew operations
|
||||
// to represent the secret that was returned prior.
|
||||
Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""`
|
||||
|
||||
// Auth will be non-nil only for Renew operations
|
||||
// to represent the auth that was returned prior.
|
||||
Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""`
|
||||
|
||||
// Headers will contain the http headers from the request. This value will
|
||||
// be used in the audit broker to ensure we are auditing only the allowed
|
||||
// headers.
|
||||
Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""`
|
||||
|
||||
// Connection will be non-nil only for credential providers to
|
||||
// inspect the connection information and potentially use it for
|
||||
// authentication/protection.
|
||||
Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"`
|
||||
|
||||
// ClientToken is provided to the core so that the identity
|
||||
// can be verified and ACLs applied. This value is passed
|
||||
// through to the logical backends but after being salted and
|
||||
// hashed.
|
||||
ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""`
|
||||
|
||||
// ClientTokenAccessor is provided to the core so that the it can get
|
||||
// logged as part of request audit logging.
|
||||
ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""`
|
||||
|
||||
// DisplayName is provided to the logical backend to help associate
|
||||
// dynamic secrets with the source entity. This is not a sensitive
|
||||
// name, but is useful for operators.
|
||||
DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""`
|
||||
|
||||
// MountPoint is provided so that a logical backend can generate
|
||||
// paths relative to itself. The `Path` is effectively the client
|
||||
// request path with the MountPoint trimmed off.
|
||||
MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""`
|
||||
|
||||
// MountType is provided so that a logical backend can make decisions
|
||||
// based on the specific mount type (e.g., if a mount type has different
|
||||
// aliases, generating different defaults depending on the alias)
|
||||
MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""`
|
||||
|
||||
// MountAccessor is provided so that identities returned by the authentication
|
||||
// backends can be tied to the mount it belongs to.
|
||||
MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""`
|
||||
|
||||
// WrapInfo contains requested response wrapping parameters
|
||||
WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""`
|
||||
|
||||
// ClientTokenRemainingUses represents the allowed number of uses left on the
|
||||
// token supplied
|
||||
ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"`
|
||||
|
||||
// EntityID is the identity of the caller extracted out of the token used
|
||||
// to make this request
|
||||
EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""`
|
||||
|
||||
// PolicyOverride indicates that the requestor wishes to override
|
||||
// soft-mandatory Sentinel policies
|
||||
PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"`
|
||||
|
||||
// Whether the request is unauthenticated, as in, had no client token
|
||||
// attached. Useful in some situations where the client token is not made
|
||||
// accessible.
|
||||
Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"`
|
||||
|
||||
// MFACreds holds the parsed MFA information supplied over the API as part of
|
||||
// X-Vault-MFA header
|
||||
MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""`
|
||||
|
||||
// Cached token entry. This avoids another lookup in request handling when
|
||||
// we've already looked it up at http handling time. Note that this token
|
||||
// has not been "used", as in it will not properly take into account use
|
||||
// count limitations. As a result this field should only ever be used for
|
||||
// transport to a function that would otherwise do a lookup and then
|
||||
// properly use the token.
|
||||
tokenEntry *TokenEntry
|
||||
|
||||
// For replication, contains the last WAL on the remote side after handling
|
||||
// the request, used for best-effort avoidance of stale read-after-write
|
||||
lastRemoteWAL uint64
|
||||
|
||||
// ControlGroup holds the authorizations that have happened on this
|
||||
// request
|
||||
ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""`
|
||||
|
||||
// ClientTokenSource tells us where the client token was sourced from, so
|
||||
// we can delete it before sending off to plugins
|
||||
ClientTokenSource ClientTokenSource
|
||||
|
||||
// HTTPRequest, if set, can be used to access fields from the HTTP request
|
||||
// that generated this logical.Request object, such as the request body.
|
||||
HTTPRequest *http.Request `json:"-" sentinel:""`
|
||||
|
||||
// ResponseWriter if set can be used to stream a response value to the http
|
||||
// request that generated this logical.Request object.
|
||||
ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""`
|
||||
|
||||
// requiredState is used internally to propagate the X-Vault-Index request
|
||||
// header to later levels of request processing that operate only on
|
||||
// logical.Request.
|
||||
requiredState []string
|
||||
|
||||
// responseState is used internally to propagate the state that should appear
|
||||
// in response headers; it's attached to the request rather than the response
|
||||
// because not all requests yields non-nil responses.
|
||||
responseState *WALState
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the request by using copystructure
|
||||
func (r *Request) Clone() (*Request, error) {
|
||||
cpy, err := copystructure.Copy(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cpy.(*Request), nil
|
||||
}
|
||||
|
||||
// Get returns a data field and guards for nil Data
|
||||
func (r *Request) Get(key string) interface{} {
|
||||
if r.Data == nil {
|
||||
return nil
|
||||
}
|
||||
return r.Data[key]
|
||||
}
|
||||
|
||||
// GetString returns a data field as a string
|
||||
func (r *Request) GetString(key string) string {
|
||||
raw := r.Get(key)
|
||||
s, _ := raw.(string)
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *Request) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *r)
|
||||
}
|
||||
|
||||
func (r *Request) SentinelGet(key string) (interface{}, error) {
|
||||
switch key {
|
||||
case "path":
|
||||
// Sanitize it here so that it's consistent in policies
|
||||
return strings.TrimPrefix(r.Path, "/"), nil
|
||||
|
||||
case "wrapping", "wrap_info":
|
||||
// If the pointer is nil accessing the wrap info is considered
|
||||
// "undefined" so this allows us to instead discover a TTL of zero
|
||||
if r.WrapInfo == nil {
|
||||
return &RequestWrapInfo{}, nil
|
||||
}
|
||||
return r.WrapInfo, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *Request) SentinelKeys() []string {
|
||||
return []string{
|
||||
"path",
|
||||
"wrapping",
|
||||
"wrap_info",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) LastRemoteWAL() uint64 {
|
||||
return r.lastRemoteWAL
|
||||
}
|
||||
|
||||
func (r *Request) SetLastRemoteWAL(last uint64) {
|
||||
r.lastRemoteWAL = last
|
||||
}
|
||||
|
||||
func (r *Request) RequiredState() []string {
|
||||
return r.requiredState
|
||||
}
|
||||
|
||||
func (r *Request) SetRequiredState(state []string) {
|
||||
r.requiredState = state
|
||||
}
|
||||
|
||||
func (r *Request) ResponseState() *WALState {
|
||||
return r.responseState
|
||||
}
|
||||
|
||||
func (r *Request) SetResponseState(w *WALState) {
|
||||
r.responseState = w
|
||||
}
|
||||
|
||||
func (r *Request) TokenEntry() *TokenEntry {
|
||||
return r.tokenEntry
|
||||
}
|
||||
|
||||
func (r *Request) SetTokenEntry(te *TokenEntry) {
|
||||
r.tokenEntry = te
|
||||
}
|
||||
|
||||
// RenewRequest creates the structure of the renew request.
|
||||
func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request {
|
||||
return &Request{
|
||||
Operation: RenewOperation,
|
||||
Path: path,
|
||||
Data: data,
|
||||
Secret: secret,
|
||||
}
|
||||
}
|
||||
|
||||
// RenewAuthRequest creates the structure of the renew request for an auth.
|
||||
func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request {
|
||||
return &Request{
|
||||
Operation: RenewOperation,
|
||||
Path: path,
|
||||
Data: data,
|
||||
Auth: auth,
|
||||
}
|
||||
}
|
||||
|
||||
// RevokeRequest creates the structure of the revoke request.
|
||||
func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request {
|
||||
return &Request{
|
||||
Operation: RevokeOperation,
|
||||
Path: path,
|
||||
Data: data,
|
||||
Secret: secret,
|
||||
}
|
||||
}
|
||||
|
||||
// RollbackRequest creates the structure of the revoke request.
|
||||
func RollbackRequest(path string) *Request {
|
||||
return &Request{
|
||||
Operation: RollbackOperation,
|
||||
Path: path,
|
||||
Data: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Operation is an enum that is used to specify the type
|
||||
// of request being made
|
||||
type Operation string
|
||||
|
||||
const (
|
||||
// The operations below are called per path
|
||||
CreateOperation Operation = "create"
|
||||
ReadOperation = "read"
|
||||
UpdateOperation = "update"
|
||||
DeleteOperation = "delete"
|
||||
ListOperation = "list"
|
||||
HelpOperation = "help"
|
||||
AliasLookaheadOperation = "alias-lookahead"
|
||||
|
||||
// The operations below are called globally, the path is less relevant.
|
||||
RevokeOperation Operation = "revoke"
|
||||
RenewOperation = "renew"
|
||||
RollbackOperation = "rollback"
|
||||
)
|
||||
|
||||
type MFACreds map[string][]string
|
||||
|
||||
// InitializationRequest stores the parameters and context of an Initialize()
|
||||
// call being made to a logical.Backend.
|
||||
type InitializationRequest struct {
|
||||
|
||||
// Storage can be used to durably store and retrieve state.
|
||||
Storage Storage
|
||||
}
|
213
vendor/github.com/hashicorp/vault/sdk/logical/response.go
generated
vendored
Normal file
213
vendor/github.com/hashicorp/vault/sdk/logical/response.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/wrapping"
|
||||
)
|
||||
|
||||
const (
|
||||
// HTTPContentType can be specified in the Data field of a Response
|
||||
// so that the HTTP front end can specify a custom Content-Type associated
|
||||
// with the HTTPRawBody. This can only be used for non-secrets, and should
|
||||
// be avoided unless absolutely necessary, such as implementing a specification.
|
||||
// The value must be a string.
|
||||
HTTPContentType = "http_content_type"
|
||||
|
||||
// HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType.
|
||||
// This can only be specified for non-secrets, and should should be similarly
|
||||
// avoided like the HTTPContentType. The value must be a byte slice.
|
||||
HTTPRawBody = "http_raw_body"
|
||||
|
||||
// HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType.
|
||||
// This can only be specified for non-secrets, and should should be similarly
|
||||
// avoided like the HTTPContentType. The value must be an integer.
|
||||
HTTPStatusCode = "http_status_code"
|
||||
|
||||
// For unwrapping we may need to know whether the value contained in the
|
||||
// raw body is already JSON-unmarshaled. The presence of this key indicates
|
||||
// that it has already been unmarshaled. That way we don't need to simply
|
||||
// ignore errors.
|
||||
HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded"
|
||||
|
||||
// If set, HTTPRawCacheControl will replace the default Cache-Control=no-store header
|
||||
// set by the generic wrapping handler. The value must be a string.
|
||||
HTTPRawCacheControl = "http_raw_cache_control"
|
||||
)
|
||||
|
||||
// Response is a struct that stores the response of a request.
|
||||
// It is used to abstract the details of the higher level request protocol.
|
||||
type Response struct {
|
||||
// Secret, if not nil, denotes that this response represents a secret.
|
||||
Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
|
||||
|
||||
// Auth, if not nil, contains the authentication information for
|
||||
// this response. This is only checked and means something for
|
||||
// credential backends.
|
||||
Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
|
||||
|
||||
// Response data is an opaque map that must have string keys. For
|
||||
// secrets, this data is sent down to the user as-is. To store internal
|
||||
// data that you don't want the user to see, store it in
|
||||
// Secret.InternalData.
|
||||
Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"`
|
||||
|
||||
// Redirect is an HTTP URL to redirect to for further authentication.
|
||||
// This is only valid for credential backends. This will be blanked
|
||||
// for any logical backend and ignored.
|
||||
Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"`
|
||||
|
||||
// Warnings allow operations or backends to return warnings in response
|
||||
// to user actions without failing the action outright.
|
||||
Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
|
||||
|
||||
// Information for wrapping the response in a cubbyhole
|
||||
WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
|
||||
|
||||
// Headers will contain the http headers from the plugin that it wishes to
|
||||
// have as part of the output
|
||||
Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"`
|
||||
}
|
||||
|
||||
// AddWarning adds a warning into the response's warning list
|
||||
func (r *Response) AddWarning(warning string) {
|
||||
if r.Warnings == nil {
|
||||
r.Warnings = make([]string, 0, 1)
|
||||
}
|
||||
r.Warnings = append(r.Warnings, warning)
|
||||
}
|
||||
|
||||
// IsError returns true if this response seems to indicate an error.
|
||||
func (r *Response) IsError() bool {
|
||||
return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil
|
||||
}
|
||||
|
||||
func (r *Response) Error() error {
|
||||
if !r.IsError() {
|
||||
return nil
|
||||
}
|
||||
switch r.Data["error"].(type) {
|
||||
case string:
|
||||
return errors.New(r.Data["error"].(string))
|
||||
case error:
|
||||
return r.Data["error"].(error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HelpResponse is used to format a help response
|
||||
func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response {
|
||||
return &Response{
|
||||
Data: map[string]interface{}{
|
||||
"help": text,
|
||||
"see_also": seeAlso,
|
||||
"openapi": oapiDoc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorResponse is used to format an error response
|
||||
func ErrorResponse(text string, vargs ...interface{}) *Response {
|
||||
if len(vargs) > 0 {
|
||||
text = fmt.Sprintf(text, vargs...)
|
||||
}
|
||||
return &Response{
|
||||
Data: map[string]interface{}{
|
||||
"error": text,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListResponse is used to format a response to a list operation.
|
||||
func ListResponse(keys []string) *Response {
|
||||
resp := &Response{
|
||||
Data: map[string]interface{}{},
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
resp.Data["keys"] = keys
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// ListResponseWithInfo is used to format a response to a list operation and
|
||||
// return the keys as well as a map with corresponding key info.
|
||||
func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response {
|
||||
resp := ListResponse(keys)
|
||||
|
||||
keyInfoData := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
val, ok := keyInfo[key]
|
||||
if ok {
|
||||
keyInfoData[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
if len(keyInfoData) > 0 {
|
||||
resp.Data["key_info"] = keyInfoData
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// RespondWithStatusCode takes a response and converts it to a raw response with
|
||||
// the provided Status Code.
|
||||
func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) {
|
||||
ret := &Response{
|
||||
Data: map[string]interface{}{
|
||||
HTTPContentType: "application/json",
|
||||
HTTPStatusCode: code,
|
||||
},
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
httpResp := LogicalResponseToHTTPResponse(resp)
|
||||
|
||||
if req != nil {
|
||||
httpResp.RequestID = req.ID
|
||||
}
|
||||
|
||||
body, err := json.Marshal(httpResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We default to string here so that the value is HMAC'd via audit.
|
||||
// Since this function is always marshaling to JSON, this is
|
||||
// appropriate.
|
||||
ret.Data[HTTPRawBody] = string(body)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// HTTPResponseWriter is optionally added to a request object and can be used to
|
||||
// write directly to the HTTP response writer.
|
||||
type HTTPResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
written *uint32
|
||||
}
|
||||
|
||||
// NewHTTPResponseWriter creates a new HTTPResponseWriter object that wraps the
|
||||
// provided io.Writer.
|
||||
func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter {
|
||||
return &HTTPResponseWriter{
|
||||
ResponseWriter: w,
|
||||
written: new(uint32),
|
||||
}
|
||||
}
|
||||
|
||||
// Write will write the bytes to the underlying io.Writer.
|
||||
func (rw *HTTPResponseWriter) Write(bytes []byte) (int, error) {
|
||||
atomic.StoreUint32(rw.written, 1)
|
||||
|
||||
return rw.ResponseWriter.Write(bytes)
|
||||
}
|
||||
|
||||
// Written tells us if the writer has been written to yet.
|
||||
func (rw *HTTPResponseWriter) Written() bool {
|
||||
return atomic.LoadUint32(rw.written) == 1
|
||||
}
|
174
vendor/github.com/hashicorp/vault/sdk/logical/response_util.go
generated
vendored
Normal file
174
vendor/github.com/hashicorp/vault/sdk/logical/response_util.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
)
|
||||
|
||||
// RespondErrorCommon pulls most of the functionality from http's
|
||||
// respondErrorCommon and some of http's handleLogical and makes it available
|
||||
// to both the http package and elsewhere.
|
||||
func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
|
||||
if err == nil && (resp == nil || !resp.IsError()) {
|
||||
switch {
|
||||
case req.Operation == ReadOperation:
|
||||
if resp == nil {
|
||||
return http.StatusNotFound, nil
|
||||
}
|
||||
|
||||
// Basically: if we have empty "keys" or no keys at all, 404. This
|
||||
// provides consistency with GET.
|
||||
case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil):
|
||||
if resp == nil {
|
||||
return http.StatusNotFound, nil
|
||||
}
|
||||
if len(resp.Data) == 0 {
|
||||
if len(resp.Warnings) > 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return http.StatusNotFound, nil
|
||||
}
|
||||
keysRaw, ok := resp.Data["keys"]
|
||||
if !ok || keysRaw == nil {
|
||||
// If we don't have keys but have other data, return as-is
|
||||
if len(resp.Data) > 0 || len(resp.Warnings) > 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return http.StatusNotFound, nil
|
||||
}
|
||||
|
||||
var keys []string
|
||||
switch keysRaw.(type) {
|
||||
case []interface{}:
|
||||
keys = make([]string, len(keysRaw.([]interface{})))
|
||||
for i, el := range keysRaw.([]interface{}) {
|
||||
s, ok := el.(string)
|
||||
if !ok {
|
||||
return http.StatusInternalServerError, nil
|
||||
}
|
||||
keys[i] = s
|
||||
}
|
||||
|
||||
case []string:
|
||||
keys = keysRaw.([]string)
|
||||
default:
|
||||
return http.StatusInternalServerError, nil
|
||||
}
|
||||
|
||||
if len(keys) == 0 {
|
||||
return http.StatusNotFound, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if errwrap.ContainsType(err, new(ReplicationCodedError)) {
|
||||
var allErrors error
|
||||
var codedErr *ReplicationCodedError
|
||||
errwrap.Walk(err, func(inErr error) {
|
||||
newErr, ok := inErr.(*ReplicationCodedError)
|
||||
if ok {
|
||||
codedErr = newErr
|
||||
} else {
|
||||
allErrors = multierror.Append(allErrors, inErr)
|
||||
}
|
||||
})
|
||||
if allErrors != nil {
|
||||
return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors)
|
||||
}
|
||||
return codedErr.Code, errors.New(codedErr.Msg)
|
||||
}
|
||||
|
||||
// Start out with internal server error since in most of these cases there
|
||||
// won't be a response so this won't be overridden
|
||||
statusCode := http.StatusInternalServerError
|
||||
// If we actually have a response, start out with bad request
|
||||
if resp != nil {
|
||||
statusCode = http.StatusBadRequest
|
||||
}
|
||||
|
||||
// Now, check the error itself; if it has a specific logical error, set the
|
||||
// appropriate code
|
||||
if err != nil {
|
||||
switch {
|
||||
case errwrap.ContainsType(err, new(StatusBadRequest)):
|
||||
statusCode = http.StatusBadRequest
|
||||
case errwrap.Contains(err, ErrPermissionDenied.Error()):
|
||||
statusCode = http.StatusForbidden
|
||||
case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()):
|
||||
statusCode = http.StatusBadRequest
|
||||
case errwrap.Contains(err, ErrUnsupportedOperation.Error()):
|
||||
statusCode = http.StatusMethodNotAllowed
|
||||
case errwrap.Contains(err, ErrUnsupportedPath.Error()):
|
||||
statusCode = http.StatusNotFound
|
||||
case errwrap.Contains(err, ErrInvalidRequest.Error()):
|
||||
statusCode = http.StatusBadRequest
|
||||
case errwrap.Contains(err, ErrUpstreamRateLimited.Error()):
|
||||
statusCode = http.StatusBadGateway
|
||||
case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()):
|
||||
statusCode = http.StatusTooManyRequests
|
||||
case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()):
|
||||
statusCode = http.StatusTooManyRequests
|
||||
case errwrap.Contains(err, ErrMissingRequiredState.Error()):
|
||||
statusCode = http.StatusPreconditionFailed
|
||||
}
|
||||
}
|
||||
|
||||
if resp != nil && resp.IsError() {
|
||||
err = fmt.Errorf("%s", resp.Data["error"].(string))
|
||||
}
|
||||
|
||||
return statusCode, err
|
||||
}
|
||||
|
||||
// AdjustErrorStatusCode adjusts the status that will be sent in error
|
||||
// conditions in a way that can be shared across http's respondError and other
|
||||
// locations.
|
||||
func AdjustErrorStatusCode(status *int, err error) {
|
||||
// Handle nested errors
|
||||
if t, ok := err.(*multierror.Error); ok {
|
||||
for _, e := range t.Errors {
|
||||
AdjustErrorStatusCode(status, e)
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust status code when sealed
|
||||
if errwrap.Contains(err, consts.ErrSealed.Error()) {
|
||||
*status = http.StatusServiceUnavailable
|
||||
}
|
||||
|
||||
// Adjust status code on
|
||||
if errwrap.Contains(err, "http: request body too large") {
|
||||
*status = http.StatusRequestEntityTooLarge
|
||||
}
|
||||
|
||||
// Allow HTTPCoded error passthrough to specify a code
|
||||
if t, ok := err.(HTTPCodedError); ok {
|
||||
*status = t.Code()
|
||||
}
|
||||
}
|
||||
|
||||
func RespondError(w http.ResponseWriter, status int, err error) {
|
||||
AdjustErrorStatusCode(&status, err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
|
||||
type ErrorResponse struct {
|
||||
Errors []string `json:"errors"`
|
||||
}
|
||||
resp := &ErrorResponse{Errors: make([]string, 0, 1)}
|
||||
if err != nil {
|
||||
resp.Errors = append(resp.Errors, err.Error())
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
enc.Encode(resp)
|
||||
}
|
30
vendor/github.com/hashicorp/vault/sdk/logical/secret.go
generated
vendored
Normal file
30
vendor/github.com/hashicorp/vault/sdk/logical/secret.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package logical
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Secret represents the secret part of a response.
|
||||
type Secret struct {
|
||||
LeaseOptions
|
||||
|
||||
// InternalData is JSON-encodable data that is stored with the secret.
|
||||
// This will be sent back during a Renew/Revoke for storing internal data
|
||||
// used for those operations.
|
||||
InternalData map[string]interface{} `json:"internal_data" sentinel:""`
|
||||
|
||||
// LeaseID is the ID returned to the user to manage this secret.
|
||||
// This is generated by Vault core. Any set value will be ignored.
|
||||
// For requests, this will always be blank.
|
||||
LeaseID string `sentinel:""`
|
||||
}
|
||||
|
||||
func (s *Secret) Validate() error {
|
||||
if s.TTL < 0 {
|
||||
return fmt.Errorf("ttl duration must not be less than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Secret) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *s)
|
||||
}
|
158
vendor/github.com/hashicorp/vault/sdk/logical/storage.go
generated
vendored
Normal file
158
vendor/github.com/hashicorp/vault/sdk/logical/storage.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
)
|
||||
|
||||
// ErrReadOnly is returned when a backend does not support
|
||||
// writing. This can be caused by a read-only replica or secondary
|
||||
// cluster operation.
|
||||
var ErrReadOnly = errors.New("cannot write to readonly storage")
|
||||
|
||||
// ErrSetupReadOnly is returned when a write operation is attempted on a
|
||||
// storage while the backend is still being setup.
|
||||
var ErrSetupReadOnly = errors.New("cannot write to storage during setup")
|
||||
|
||||
// Storage is the way that logical backends are able read/write data.
|
||||
type Storage interface {
|
||||
List(context.Context, string) ([]string, error)
|
||||
Get(context.Context, string) (*StorageEntry, error)
|
||||
Put(context.Context, *StorageEntry) error
|
||||
Delete(context.Context, string) error
|
||||
}
|
||||
|
||||
// StorageEntry is the entry for an item in a Storage implementation.
|
||||
type StorageEntry struct {
|
||||
Key string
|
||||
Value []byte
|
||||
SealWrap bool
|
||||
}
|
||||
|
||||
// DecodeJSON decodes the 'Value' present in StorageEntry.
|
||||
func (e *StorageEntry) DecodeJSON(out interface{}) error {
|
||||
return jsonutil.DecodeJSON(e.Value, out)
|
||||
}
|
||||
|
||||
// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
|
||||
func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
|
||||
encodedBytes, err := jsonutil.EncodeJSON(v)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err)
|
||||
}
|
||||
|
||||
return &StorageEntry{
|
||||
Key: k,
|
||||
Value: encodedBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ClearableView interface {
|
||||
List(context.Context, string) ([]string, error)
|
||||
Delete(context.Context, string) error
|
||||
}
|
||||
|
||||
// ScanView is used to scan all the keys in a view iteratively
|
||||
func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error {
|
||||
frontier := []string{""}
|
||||
for len(frontier) > 0 {
|
||||
n := len(frontier)
|
||||
current := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
// List the contents
|
||||
contents, err := view.List(ctx, current)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err)
|
||||
}
|
||||
|
||||
// Handle the contents in the directory
|
||||
for _, c := range contents {
|
||||
// Exit if the context has been canceled
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
fullPath := current + c
|
||||
if strings.HasSuffix(c, "/") {
|
||||
frontier = append(frontier, fullPath)
|
||||
} else {
|
||||
cb(fullPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectKeys is used to collect all the keys in a view
|
||||
func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
|
||||
return CollectKeysWithPrefix(ctx, view, "")
|
||||
}
|
||||
|
||||
// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string
|
||||
func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
cb := func(path string) {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
keys = append(keys, path)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan for all the keys
|
||||
if err := ScanView(ctx, view, cb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// ClearView is used to delete all the keys in a view
|
||||
func ClearView(ctx context.Context, view ClearableView) error {
|
||||
return ClearViewWithLogging(ctx, view, nil)
|
||||
}
|
||||
|
||||
func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error {
|
||||
if view == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = hclog.NewNullLogger()
|
||||
}
|
||||
|
||||
// Collect all the keys
|
||||
keys, err := CollectKeys(ctx, view)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug("clearing view", "total_keys", len(keys))
|
||||
|
||||
// Delete all the keys
|
||||
var pctDone int
|
||||
for idx, key := range keys {
|
||||
// Rather than keep trying to do stuff with a canceled context, bail;
|
||||
// storage will fail anyways
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := view.Delete(ctx, key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newPctDone := idx * 100.0 / len(keys)
|
||||
if int(newPctDone) > pctDone {
|
||||
pctDone = int(newPctDone)
|
||||
logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("view cleared")
|
||||
|
||||
return nil
|
||||
}
|
87
vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go
generated
vendored
Normal file
87
vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/hashicorp/vault/sdk/physical/inmem"
|
||||
)
|
||||
|
||||
// InmemStorage implements Storage and stores all data in memory. It is
|
||||
// basically a straight copy of physical.Inmem, but it prevents backends from
|
||||
// having to load all of physical's dependencies (which are legion) just to
|
||||
// have some testing storage.
|
||||
type InmemStorage struct {
|
||||
underlying physical.Backend
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) {
|
||||
s.once.Do(s.init)
|
||||
|
||||
entry, err := s.underlying.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &StorageEntry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error {
|
||||
s.once.Do(s.init)
|
||||
|
||||
return s.underlying.Put(ctx, &physical.Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *InmemStorage) Delete(ctx context.Context, key string) error {
|
||||
s.once.Do(s.init)
|
||||
|
||||
return s.underlying.Delete(ctx, key)
|
||||
}
|
||||
|
||||
func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
s.once.Do(s.init)
|
||||
|
||||
return s.underlying.List(ctx, prefix)
|
||||
}
|
||||
|
||||
func (s *InmemStorage) Underlying() *inmem.InmemBackend {
|
||||
s.once.Do(s.init)
|
||||
|
||||
return s.underlying.(*inmem.InmemBackend)
|
||||
}
|
||||
|
||||
func (s *InmemStorage) FailPut(fail bool) *InmemStorage {
|
||||
s.Underlying().FailPut(fail)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InmemStorage) FailGet(fail bool) *InmemStorage {
|
||||
s.Underlying().FailGet(fail)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InmemStorage) FailDelete(fail bool) *InmemStorage {
|
||||
s.Underlying().FailDelete(fail)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InmemStorage) FailList(fail bool) *InmemStorage {
|
||||
s.Underlying().FailList(fail)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InmemStorage) init() {
|
||||
s.underlying, _ = inmem.NewInmem(nil, nil)
|
||||
}
|
110
vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go
generated
vendored
Normal file
110
vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StorageView struct {
|
||||
storage Storage
|
||||
prefix string
|
||||
}
|
||||
|
||||
var ErrRelativePath = errors.New("relative paths not supported")
|
||||
|
||||
func NewStorageView(storage Storage, prefix string) *StorageView {
|
||||
return &StorageView{
|
||||
storage: storage,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// logical.Storage impl.
|
||||
func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
if err := s.SanityCheck(prefix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.storage.List(ctx, s.ExpandKey(prefix))
|
||||
}
|
||||
|
||||
// logical.Storage impl.
|
||||
func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) {
|
||||
if err := s.SanityCheck(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry, err := s.storage.Get(ctx, s.ExpandKey(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
entry.Key = s.TruncateKey(entry.Key)
|
||||
|
||||
return &StorageEntry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// logical.Storage impl.
|
||||
func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error {
|
||||
if entry == nil {
|
||||
return errors.New("cannot write nil entry")
|
||||
}
|
||||
|
||||
if err := s.SanityCheck(entry.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expandedKey := s.ExpandKey(entry.Key)
|
||||
|
||||
nested := &StorageEntry{
|
||||
Key: expandedKey,
|
||||
Value: entry.Value,
|
||||
SealWrap: entry.SealWrap,
|
||||
}
|
||||
|
||||
return s.storage.Put(ctx, nested)
|
||||
}
|
||||
|
||||
// logical.Storage impl.
|
||||
func (s *StorageView) Delete(ctx context.Context, key string) error {
|
||||
if err := s.SanityCheck(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expandedKey := s.ExpandKey(key)
|
||||
|
||||
return s.storage.Delete(ctx, expandedKey)
|
||||
}
|
||||
|
||||
func (s *StorageView) Prefix() string {
|
||||
return s.prefix
|
||||
}
|
||||
|
||||
// SubView constructs a nested sub-view using the given prefix
|
||||
func (s *StorageView) SubView(prefix string) *StorageView {
|
||||
sub := s.ExpandKey(prefix)
|
||||
return &StorageView{storage: s.storage, prefix: sub}
|
||||
}
|
||||
|
||||
// SanityCheck is used to perform a sanity check on a key
|
||||
func (s *StorageView) SanityCheck(key string) error {
|
||||
if strings.Contains(key, "..") {
|
||||
return ErrRelativePath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandKey is used to expand to the full key path with the prefix
|
||||
func (s *StorageView) ExpandKey(suffix string) string {
|
||||
return s.prefix + suffix
|
||||
}
|
||||
|
||||
// TruncateKey is used to remove the prefix of the key
|
||||
func (s *StorageView) TruncateKey(full string) string {
|
||||
return strings.TrimPrefix(full, s.prefix)
|
||||
}
|
211
vendor/github.com/hashicorp/vault/sdk/logical/system_view.go
generated
vendored
Normal file
211
vendor/github.com/hashicorp/vault/sdk/logical/system_view.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/license"
|
||||
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/wrapping"
|
||||
)
|
||||
|
||||
// SystemView exposes system configuration information in a safe way
|
||||
// for logical backends to consume
|
||||
type SystemView interface {
|
||||
// DefaultLeaseTTL returns the default lease TTL set in Vault configuration
|
||||
DefaultLeaseTTL() time.Duration
|
||||
|
||||
// MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
|
||||
// authors should take care not to issue credentials that last longer than
|
||||
// this value, as Vault will revoke them
|
||||
MaxLeaseTTL() time.Duration
|
||||
|
||||
// Returns true if the mount is tainted. A mount is tainted if it is in the
|
||||
// process of being unmounted. This should only be used in special
|
||||
// circumstances; a primary use-case is as a guard in revocation functions.
|
||||
// If revocation of a backend's leases fails it can keep the unmounting
|
||||
// process from being successful. If the reason for this failure is not
|
||||
// relevant when the mount is tainted (for instance, saving a CRL to disk
|
||||
// when the stored CRL will be removed during the unmounting process
|
||||
// anyways), we can ignore the errors to allow unmounting to complete.
|
||||
Tainted() bool
|
||||
|
||||
// Returns true if caching is disabled. If true, no caches should be used,
|
||||
// despite known slowdowns.
|
||||
CachingDisabled() bool
|
||||
|
||||
// When run from a system view attached to a request, indicates whether the
|
||||
// request is affecting a local mount or not
|
||||
LocalMount() bool
|
||||
|
||||
// ReplicationState indicates the state of cluster replication
|
||||
ReplicationState() consts.ReplicationState
|
||||
|
||||
// HasFeature returns true if the feature is currently enabled
|
||||
HasFeature(feature license.Features) bool
|
||||
|
||||
// ResponseWrapData wraps the given data in a cubbyhole and returns the
|
||||
// token used to unwrap.
|
||||
ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
|
||||
|
||||
// LookupPlugin looks into the plugin catalog for a plugin with the given
|
||||
// name. Returns a PluginRunner or an error if a plugin can not be found.
|
||||
LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error)
|
||||
|
||||
// MlockEnabled returns the configuration setting for enabling mlock on
|
||||
// plugins.
|
||||
MlockEnabled() bool
|
||||
|
||||
// EntityInfo returns a subset of information related to the identity entity
|
||||
// for the given entity id
|
||||
EntityInfo(entityID string) (*Entity, error)
|
||||
|
||||
// GroupsForEntity returns the group membership information for the provided
|
||||
// entity id
|
||||
GroupsForEntity(entityID string) ([]*Group, error)
|
||||
|
||||
// PluginEnv returns Vault environment information used by plugins
|
||||
PluginEnv(context.Context) (*PluginEnvironment, error)
|
||||
|
||||
// GeneratePasswordFromPolicy generates a password from the policy referenced.
|
||||
// If the policy does not exist, this will return an error.
|
||||
GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error)
|
||||
}
|
||||
|
||||
type PasswordPolicy interface {
|
||||
// Generate a random password
|
||||
Generate(context.Context, io.Reader) (string, error)
|
||||
}
|
||||
|
||||
type ExtendedSystemView interface {
|
||||
Auditor() Auditor
|
||||
ForwardGenericRequest(context.Context, *Request) (*Response, error)
|
||||
}
|
||||
|
||||
type PasswordGenerator func() (password string, err error)
|
||||
|
||||
type StaticSystemView struct {
|
||||
DefaultLeaseTTLVal time.Duration
|
||||
MaxLeaseTTLVal time.Duration
|
||||
SudoPrivilegeVal bool
|
||||
TaintedVal bool
|
||||
CachingDisabledVal bool
|
||||
Primary bool
|
||||
EnableMlock bool
|
||||
LocalMountVal bool
|
||||
ReplicationStateVal consts.ReplicationState
|
||||
EntityVal *Entity
|
||||
GroupsVal []*Group
|
||||
Features license.Features
|
||||
VaultVersion string
|
||||
PluginEnvironment *PluginEnvironment
|
||||
PasswordPolicies map[string]PasswordGenerator
|
||||
}
|
||||
|
||||
type noopAuditor struct{}
|
||||
|
||||
func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d StaticSystemView) Auditor() Auditor {
|
||||
return noopAuditor{}
|
||||
}
|
||||
|
||||
func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) {
|
||||
return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView")
|
||||
}
|
||||
|
||||
func (d StaticSystemView) DefaultLeaseTTL() time.Duration {
|
||||
return d.DefaultLeaseTTLVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) MaxLeaseTTL() time.Duration {
|
||||
return d.MaxLeaseTTLVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool {
|
||||
return d.SudoPrivilegeVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) Tainted() bool {
|
||||
return d.TaintedVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) CachingDisabled() bool {
|
||||
return d.CachingDisabledVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) LocalMount() bool {
|
||||
return d.LocalMountVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) ReplicationState() consts.ReplicationState {
|
||||
return d.ReplicationStateVal
|
||||
}
|
||||
|
||||
func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
|
||||
return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView")
|
||||
}
|
||||
|
||||
func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) {
|
||||
return nil, errors.New("LookupPlugin is not implemented in StaticSystemView")
|
||||
}
|
||||
|
||||
func (d StaticSystemView) MlockEnabled() bool {
|
||||
return d.EnableMlock
|
||||
}
|
||||
|
||||
func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) {
|
||||
return d.EntityVal, nil
|
||||
}
|
||||
|
||||
func (d StaticSystemView) GroupsForEntity(entityID string) ([]*Group, error) {
|
||||
return d.GroupsVal, nil
|
||||
}
|
||||
|
||||
func (d StaticSystemView) HasFeature(feature license.Features) bool {
|
||||
return d.Features.HasFeature(feature)
|
||||
}
|
||||
|
||||
func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) {
|
||||
return d.PluginEnvironment, nil
|
||||
}
|
||||
|
||||
func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("context timed out")
|
||||
default:
|
||||
}
|
||||
|
||||
if d.PasswordPolicies == nil {
|
||||
return "", fmt.Errorf("password policy not found")
|
||||
}
|
||||
policy, exists := d.PasswordPolicies[policyName]
|
||||
if !exists {
|
||||
return "", fmt.Errorf("password policy not found")
|
||||
}
|
||||
return policy()
|
||||
}
|
||||
|
||||
func (d *StaticSystemView) SetPasswordPolicy(name string, generator PasswordGenerator) {
|
||||
if d.PasswordPolicies == nil {
|
||||
d.PasswordPolicies = map[string]PasswordGenerator{}
|
||||
}
|
||||
d.PasswordPolicies[name] = generator
|
||||
}
|
||||
|
||||
func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) {
|
||||
_, existed = d.PasswordPolicies[name]
|
||||
delete(d.PasswordPolicies, name)
|
||||
return existed
|
||||
}
|
87
vendor/github.com/hashicorp/vault/sdk/logical/testing.go
generated
vendored
Normal file
87
vendor/github.com/hashicorp/vault/sdk/logical/testing.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
)
|
||||
|
||||
// TestRequest is a helper to create a purely in-memory Request struct.
|
||||
func TestRequest(t testing.T, op Operation, path string) *Request {
|
||||
return &Request{
|
||||
Operation: op,
|
||||
Path: path,
|
||||
Data: make(map[string]interface{}),
|
||||
Storage: new(InmemStorage),
|
||||
Connection: &Connection{},
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage is a helper that can be used from unit tests to verify
|
||||
// the behavior of a Storage impl.
|
||||
func TestStorage(t testing.T, s Storage) {
|
||||
keys, err := s.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list error: %s", err)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
t.Fatalf("should have no keys to start: %#v", keys)
|
||||
}
|
||||
|
||||
entry := &StorageEntry{Key: "foo", Value: []byte("bar")}
|
||||
if err := s.Put(context.Background(), entry); err != nil {
|
||||
t.Fatalf("put error: %s", err)
|
||||
}
|
||||
|
||||
actual, err := s.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("get error: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, entry) {
|
||||
t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual)
|
||||
}
|
||||
|
||||
keys, err = s.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list error: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(keys, []string{"foo"}) {
|
||||
t.Fatalf("bad keys: %#v", keys)
|
||||
}
|
||||
|
||||
if err := s.Delete(context.Background(), "foo"); err != nil {
|
||||
t.Fatalf("put error: %s", err)
|
||||
}
|
||||
|
||||
keys, err = s.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list error: %s", err)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
t.Fatalf("should have no keys to start: %#v", keys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSystemView() *StaticSystemView {
|
||||
defaultLeaseTTLVal := time.Hour * 24
|
||||
maxLeaseTTLVal := time.Hour * 24 * 2
|
||||
return &StaticSystemView{
|
||||
DefaultLeaseTTLVal: defaultLeaseTTLVal,
|
||||
MaxLeaseTTLVal: maxLeaseTTLVal,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendConfig() *BackendConfig {
|
||||
bc := &BackendConfig{
|
||||
Logger: logging.NewVaultLogger(log.Trace),
|
||||
System: TestSystemView(),
|
||||
Config: make(map[string]string),
|
||||
}
|
||||
|
||||
return bc
|
||||
}
|
225
vendor/github.com/hashicorp/vault/sdk/logical/token.go
generated
vendored
Normal file
225
vendor/github.com/hashicorp/vault/sdk/logical/token.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
sockaddr "github.com/hashicorp/go-sockaddr"
|
||||
)
|
||||
|
||||
type TokenType uint8
|
||||
|
||||
const (
|
||||
// TokenTypeDefault means "use the default, if any, that is currently set
|
||||
// on the mount". If not set, results in a Service token.
|
||||
TokenTypeDefault TokenType = iota
|
||||
|
||||
// TokenTypeService is a "normal" Vault token for long-lived services
|
||||
TokenTypeService
|
||||
|
||||
// TokenTypeBatch is a batch token
|
||||
TokenTypeBatch
|
||||
|
||||
// TokenTypeDefaultService, configured on a mount, means that if
|
||||
// TokenTypeDefault is sent back by the mount, create Service tokens
|
||||
TokenTypeDefaultService
|
||||
|
||||
// TokenTypeDefaultBatch, configured on a mount, means that if
|
||||
// TokenTypeDefault is sent back by the mount, create Batch tokens
|
||||
TokenTypeDefaultBatch
|
||||
)
|
||||
|
||||
func (t *TokenType) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 1 {
|
||||
*t = TokenType(b[0] - '0')
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle upgrade from pre-1.2 where we were serialized as string:
|
||||
s := string(b)
|
||||
switch s {
|
||||
case `"default"`, `""`:
|
||||
*t = TokenTypeDefault
|
||||
case `"service"`:
|
||||
*t = TokenTypeService
|
||||
case `"batch"`:
|
||||
*t = TokenTypeBatch
|
||||
case `"default-service"`:
|
||||
*t = TokenTypeDefaultService
|
||||
case `"default-batch"`:
|
||||
*t = TokenTypeDefaultBatch
|
||||
default:
|
||||
return fmt.Errorf("unknown token type %q", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t TokenType) String() string {
|
||||
switch t {
|
||||
case TokenTypeDefault:
|
||||
return "default"
|
||||
case TokenTypeService:
|
||||
return "service"
|
||||
case TokenTypeBatch:
|
||||
return "batch"
|
||||
case TokenTypeDefaultService:
|
||||
return "default-service"
|
||||
case TokenTypeDefaultBatch:
|
||||
return "default-batch"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// TokenEntry is used to represent a given token
|
||||
type TokenEntry struct {
|
||||
Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""`
|
||||
|
||||
// ID of this entry, generally a random UUID
|
||||
ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""`
|
||||
|
||||
// Accessor for this token, a random UUID
|
||||
Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""`
|
||||
|
||||
// Parent token, used for revocation trees
|
||||
Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""`
|
||||
|
||||
// Which named policies should be used
|
||||
Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
|
||||
|
||||
// Used for audit trails, this is something like "auth/user/login"
|
||||
Path string `json:"path" mapstructure:"path" structs:"path"`
|
||||
|
||||
// Used for auditing. This could include things like "source", "user", "ip"
|
||||
Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"`
|
||||
|
||||
// Used for operators to be able to associate with the source
|
||||
DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
|
||||
|
||||
// Used to restrict the number of uses (zero is unlimited). This is to
|
||||
// support one-time-tokens (generalized). There are a few special values:
|
||||
// if it's -1 it has run through its use counts and is executing its final
|
||||
// use; if it's -2 it is tainted, which means revocation is currently
|
||||
// running on it; and if it's -3 it's also tainted but revocation
|
||||
// previously ran and failed, so this hints the tidy function to try it
|
||||
// again.
|
||||
NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
|
||||
|
||||
// Time of token creation
|
||||
CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""`
|
||||
|
||||
// Duration set when token was created
|
||||
TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""`
|
||||
|
||||
// Explicit maximum TTL on the token
|
||||
ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""`
|
||||
|
||||
// If set, the role that was used for parameters at creation time
|
||||
Role string `json:"role" mapstructure:"role" structs:"role"`
|
||||
|
||||
// If set, the period of the token. This is only used when created directly
|
||||
// through the create endpoint; periods managed by roles or other auth
|
||||
// backends are subject to those renewal rules.
|
||||
Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""`
|
||||
|
||||
// These are the deprecated fields
|
||||
DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""`
|
||||
NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""`
|
||||
CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""`
|
||||
ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""`
|
||||
|
||||
EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"`
|
||||
|
||||
// The set of CIDRs that this token can be used with
|
||||
BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""`
|
||||
|
||||
// NamespaceID is the identifier of the namespace to which this token is
|
||||
// confined to. Do not return this value over the API when the token is
|
||||
// being looked up.
|
||||
NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""`
|
||||
|
||||
// CubbyholeID is the identifier of the cubbyhole storage belonging to this
|
||||
// token
|
||||
CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""`
|
||||
}
|
||||
|
||||
func (te *TokenEntry) SentinelGet(key string) (interface{}, error) {
|
||||
if te == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch key {
|
||||
case "policies":
|
||||
return te.Policies, nil
|
||||
|
||||
case "path":
|
||||
return te.Path, nil
|
||||
|
||||
case "display_name":
|
||||
return te.DisplayName, nil
|
||||
|
||||
case "num_uses":
|
||||
return te.NumUses, nil
|
||||
|
||||
case "role":
|
||||
return te.Role, nil
|
||||
|
||||
case "entity_id":
|
||||
return te.EntityID, nil
|
||||
|
||||
case "period":
|
||||
return te.Period, nil
|
||||
|
||||
case "period_seconds":
|
||||
return int64(te.Period.Seconds()), nil
|
||||
|
||||
case "explicit_max_ttl":
|
||||
return te.ExplicitMaxTTL, nil
|
||||
|
||||
case "explicit_max_ttl_seconds":
|
||||
return int64(te.ExplicitMaxTTL.Seconds()), nil
|
||||
|
||||
case "creation_ttl":
|
||||
return te.TTL, nil
|
||||
|
||||
case "creation_ttl_seconds":
|
||||
return int64(te.TTL.Seconds()), nil
|
||||
|
||||
case "creation_time":
|
||||
return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil
|
||||
|
||||
case "creation_time_unix":
|
||||
return time.Unix(te.CreationTime, 0), nil
|
||||
|
||||
case "meta", "metadata":
|
||||
return te.Meta, nil
|
||||
|
||||
case "type":
|
||||
teType := te.Type
|
||||
switch teType {
|
||||
case TokenTypeBatch, TokenTypeService:
|
||||
case TokenTypeDefault:
|
||||
teType = TokenTypeService
|
||||
default:
|
||||
return "unknown", nil
|
||||
}
|
||||
return teType.String(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (te *TokenEntry) SentinelKeys() []string {
|
||||
return []string{
|
||||
"period",
|
||||
"period_seconds",
|
||||
"explicit_max_ttl",
|
||||
"explicit_max_ttl_seconds",
|
||||
"creation_ttl",
|
||||
"creation_ttl_seconds",
|
||||
"creation_time",
|
||||
"creation_time_unix",
|
||||
"meta",
|
||||
"metadata",
|
||||
"type",
|
||||
}
|
||||
}
|
157
vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go
generated
vendored
Normal file
157
vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
package logical
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This logic was pulled from the http package so that it can be used for
|
||||
// encoding wrapped responses as well. It simply translates the logical
|
||||
// response to an http response, with the values we want and omitting the
|
||||
// values we don't.
|
||||
func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
|
||||
httpResp := &HTTPResponse{
|
||||
Data: input.Data,
|
||||
Warnings: input.Warnings,
|
||||
Headers: input.Headers,
|
||||
}
|
||||
|
||||
if input.Secret != nil {
|
||||
httpResp.LeaseID = input.Secret.LeaseID
|
||||
httpResp.Renewable = input.Secret.Renewable
|
||||
httpResp.LeaseDuration = int(input.Secret.TTL.Seconds())
|
||||
}
|
||||
|
||||
// If we have authentication information, then
|
||||
// set up the result structure.
|
||||
if input.Auth != nil {
|
||||
httpResp.Auth = &HTTPAuth{
|
||||
ClientToken: input.Auth.ClientToken,
|
||||
Accessor: input.Auth.Accessor,
|
||||
Policies: input.Auth.Policies,
|
||||
TokenPolicies: input.Auth.TokenPolicies,
|
||||
IdentityPolicies: input.Auth.IdentityPolicies,
|
||||
Metadata: input.Auth.Metadata,
|
||||
LeaseDuration: int(input.Auth.TTL.Seconds()),
|
||||
Renewable: input.Auth.Renewable,
|
||||
EntityID: input.Auth.EntityID,
|
||||
TokenType: input.Auth.TokenType.String(),
|
||||
Orphan: input.Auth.Orphan,
|
||||
}
|
||||
}
|
||||
|
||||
return httpResp
|
||||
}
|
||||
|
||||
func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response {
|
||||
logicalResp := &Response{
|
||||
Data: input.Data,
|
||||
Warnings: input.Warnings,
|
||||
Headers: input.Headers,
|
||||
}
|
||||
|
||||
if input.LeaseID != "" {
|
||||
logicalResp.Secret = &Secret{
|
||||
LeaseID: input.LeaseID,
|
||||
}
|
||||
logicalResp.Secret.Renewable = input.Renewable
|
||||
logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration)
|
||||
}
|
||||
|
||||
if input.Auth != nil {
|
||||
logicalResp.Auth = &Auth{
|
||||
ClientToken: input.Auth.ClientToken,
|
||||
Accessor: input.Auth.Accessor,
|
||||
Policies: input.Auth.Policies,
|
||||
TokenPolicies: input.Auth.TokenPolicies,
|
||||
IdentityPolicies: input.Auth.IdentityPolicies,
|
||||
Metadata: input.Auth.Metadata,
|
||||
EntityID: input.Auth.EntityID,
|
||||
Orphan: input.Auth.Orphan,
|
||||
}
|
||||
logicalResp.Auth.Renewable = input.Auth.Renewable
|
||||
logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration)
|
||||
switch input.Auth.TokenType {
|
||||
case "service":
|
||||
logicalResp.Auth.TokenType = TokenTypeService
|
||||
case "batch":
|
||||
logicalResp.Auth.TokenType = TokenTypeBatch
|
||||
}
|
||||
}
|
||||
|
||||
return logicalResp
|
||||
}
|
||||
|
||||
type HTTPResponse struct {
|
||||
RequestID string `json:"request_id"`
|
||||
LeaseID string `json:"lease_id"`
|
||||
Renewable bool `json:"renewable"`
|
||||
LeaseDuration int `json:"lease_duration"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
WrapInfo *HTTPWrapInfo `json:"wrap_info"`
|
||||
Warnings []string `json:"warnings"`
|
||||
Headers map[string][]string `json:"-"`
|
||||
Auth *HTTPAuth `json:"auth"`
|
||||
}
|
||||
|
||||
type HTTPAuth struct {
|
||||
ClientToken string `json:"client_token"`
|
||||
Accessor string `json:"accessor"`
|
||||
Policies []string `json:"policies"`
|
||||
TokenPolicies []string `json:"token_policies,omitempty"`
|
||||
IdentityPolicies []string `json:"identity_policies,omitempty"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
LeaseDuration int `json:"lease_duration"`
|
||||
Renewable bool `json:"renewable"`
|
||||
EntityID string `json:"entity_id"`
|
||||
TokenType string `json:"token_type"`
|
||||
Orphan bool `json:"orphan"`
|
||||
}
|
||||
|
||||
type HTTPWrapInfo struct {
|
||||
Token string `json:"token"`
|
||||
Accessor string `json:"accessor"`
|
||||
TTL int `json:"ttl"`
|
||||
CreationTime string `json:"creation_time"`
|
||||
CreationPath string `json:"creation_path"`
|
||||
WrappedAccessor string `json:"wrapped_accessor,omitempty"`
|
||||
}
|
||||
|
||||
type HTTPSysInjector struct {
|
||||
Response *HTTPResponse
|
||||
}
|
||||
|
||||
func (h HTTPSysInjector) MarshalJSON() ([]byte, error) {
|
||||
j, err := json.Marshal(h.Response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fast path no data or empty data
|
||||
if h.Response.Data == nil || len(h.Response.Data) == 0 {
|
||||
return j, nil
|
||||
}
|
||||
// Marshaling a response will always be a JSON object, meaning it will
|
||||
// always start with '{', so we hijack this to prepend necessary values
|
||||
// Make a guess at the capacity, and write the object opener
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(j)*2))
|
||||
buf.WriteRune('{')
|
||||
for k, v := range h.Response.Data {
|
||||
// Marshal each key/value individually
|
||||
mk, err := json.Marshal(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mv, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Write into the final buffer. We'll never have a valid response
|
||||
// without any fields so we can unconditionally add a comma after each.
|
||||
buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv))
|
||||
}
|
||||
// Add the rest, without the first '{'
|
||||
buf.Write(j[1:])
|
||||
return buf.Bytes(), nil
|
||||
}
|
261
vendor/github.com/hashicorp/vault/sdk/physical/cache.go
generated
vendored
Normal file
261
vendor/github.com/hashicorp/vault/sdk/physical/cache.go
generated
vendored
Normal file
@ -0,0 +1,261 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/vault/sdk/helper/locksutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/pathmanager"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultCacheSize is used if no cache size is specified for NewCache
|
||||
DefaultCacheSize = 128 * 1024
|
||||
|
||||
// refreshCacheCtxKey is a ctx value that denotes the cache should be
|
||||
// refreshed during a Get call.
|
||||
refreshCacheCtxKey = "refresh_cache"
|
||||
)
|
||||
|
||||
// These paths don't need to be cached by the LRU cache. This should
|
||||
// particularly help memory pressure when unsealing.
|
||||
var cacheExceptionsPaths = []string{
|
||||
"wal/logs/",
|
||||
"index/pages/",
|
||||
"index-dr/pages/",
|
||||
"sys/expire/",
|
||||
"core/poison-pill",
|
||||
"core/raft/tls",
|
||||
"core/license",
|
||||
}
|
||||
|
||||
// CacheRefreshContext returns a context with an added value denoting if the
|
||||
// cache should attempt a refresh.
|
||||
func CacheRefreshContext(ctx context.Context, r bool) context.Context {
|
||||
return context.WithValue(ctx, refreshCacheCtxKey, r)
|
||||
}
|
||||
|
||||
// cacheRefreshFromContext is a helper to look up if the provided context is
|
||||
// requesting a cache refresh.
|
||||
func cacheRefreshFromContext(ctx context.Context) bool {
|
||||
r, ok := ctx.Value(refreshCacheCtxKey).(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Cache is used to wrap an underlying physical backend
|
||||
// and provide an LRU cache layer on top. Most of the reads done by
|
||||
// Vault are for policy objects so there is a large read reduction
|
||||
// by using a simple write-through cache.
|
||||
type Cache struct {
|
||||
backend Backend
|
||||
lru *lru.TwoQueueCache
|
||||
locks []*locksutil.LockEntry
|
||||
logger log.Logger
|
||||
enabled *uint32
|
||||
cacheExceptions *pathmanager.PathManager
|
||||
metricSink metrics.MetricSink
|
||||
}
|
||||
|
||||
// TransactionalCache is a Cache that wraps the physical that is transactional
|
||||
type TransactionalCache struct {
|
||||
*Cache
|
||||
Transactional
|
||||
}
|
||||
|
||||
// Verify Cache satisfies the correct interfaces
|
||||
var (
|
||||
_ ToggleablePurgemonster = (*Cache)(nil)
|
||||
_ ToggleablePurgemonster = (*TransactionalCache)(nil)
|
||||
_ Backend = (*Cache)(nil)
|
||||
_ Transactional = (*TransactionalCache)(nil)
|
||||
)
|
||||
|
||||
// NewCache returns a physical cache of the given size.
|
||||
// If no size is provided, the default size is used.
|
||||
func NewCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *Cache {
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("creating LRU cache", "size", size)
|
||||
}
|
||||
if size <= 0 {
|
||||
size = DefaultCacheSize
|
||||
}
|
||||
|
||||
pm := pathmanager.New()
|
||||
pm.AddPaths(cacheExceptionsPaths)
|
||||
|
||||
cache, _ := lru.New2Q(size)
|
||||
c := &Cache{
|
||||
backend: b,
|
||||
lru: cache,
|
||||
locks: locksutil.CreateLocks(),
|
||||
logger: logger,
|
||||
// This fails safe.
|
||||
enabled: new(uint32),
|
||||
cacheExceptions: pm,
|
||||
metricSink: metricSink,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func NewTransactionalCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *TransactionalCache {
|
||||
c := &TransactionalCache{
|
||||
Cache: NewCache(b, size, logger, metricSink),
|
||||
Transactional: b.(Transactional),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Cache) ShouldCache(key string) bool {
|
||||
if atomic.LoadUint32(c.enabled) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return !c.cacheExceptions.HasPath(key)
|
||||
}
|
||||
|
||||
// SetEnabled is used to toggle whether the cache is on or off. It must be
|
||||
// called with true to actually activate the cache after creation.
|
||||
func (c *Cache) SetEnabled(enabled bool) {
|
||||
if enabled {
|
||||
atomic.StoreUint32(c.enabled, 1)
|
||||
return
|
||||
}
|
||||
atomic.StoreUint32(c.enabled, 0)
|
||||
}
|
||||
|
||||
// Purge is used to clear the cache
|
||||
func (c *Cache) Purge(ctx context.Context) {
|
||||
// Lock the world
|
||||
for _, lock := range c.locks {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
}
|
||||
|
||||
c.lru.Purge()
|
||||
}
|
||||
|
||||
func (c *Cache) Put(ctx context.Context, entry *Entry) error {
|
||||
if entry != nil && !c.ShouldCache(entry.Key) {
|
||||
return c.backend.Put(ctx, entry)
|
||||
}
|
||||
|
||||
lock := locksutil.LockForKey(c.locks, entry.Key)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
err := c.backend.Put(ctx, entry)
|
||||
if err == nil {
|
||||
c.lru.Add(entry.Key, entry)
|
||||
c.metricSink.IncrCounter([]string{"cache", "write"}, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) {
|
||||
if !c.ShouldCache(key) {
|
||||
return c.backend.Get(ctx, key)
|
||||
}
|
||||
|
||||
lock := locksutil.LockForKey(c.locks, key)
|
||||
lock.RLock()
|
||||
defer lock.RUnlock()
|
||||
|
||||
// Check the LRU first
|
||||
if !cacheRefreshFromContext(ctx) {
|
||||
if raw, ok := c.lru.Get(key); ok {
|
||||
if raw == nil {
|
||||
return nil, nil
|
||||
}
|
||||
c.metricSink.IncrCounter([]string{"cache", "hit"}, 1)
|
||||
return raw.(*Entry), nil
|
||||
}
|
||||
}
|
||||
|
||||
c.metricSink.IncrCounter([]string{"cache", "miss"}, 1)
|
||||
// Read from the underlying backend
|
||||
ent, err := c.backend.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache the result
|
||||
c.lru.Add(key, ent)
|
||||
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||
if !c.ShouldCache(key) {
|
||||
return c.backend.Delete(ctx, key)
|
||||
}
|
||||
|
||||
lock := locksutil.LockForKey(c.locks, key)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
err := c.backend.Delete(ctx, key)
|
||||
if err == nil {
|
||||
c.lru.Remove(key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
// Always pass-through as this would be difficult to cache. For the same
|
||||
// reason we don't lock as we can't reasonably know which locks to readlock
|
||||
// ahead of time.
|
||||
return c.backend.List(ctx, prefix)
|
||||
}
|
||||
|
||||
func (c *TransactionalCache) Locks() []*locksutil.LockEntry {
|
||||
return c.locks
|
||||
}
|
||||
|
||||
func (c *TransactionalCache) LRU() *lru.TwoQueueCache {
|
||||
return c.lru
|
||||
}
|
||||
|
||||
func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error {
|
||||
// Bypass the locking below
|
||||
if atomic.LoadUint32(c.enabled) == 0 {
|
||||
return c.Transactional.Transaction(ctx, txns)
|
||||
}
|
||||
|
||||
// Collect keys that need to be locked
|
||||
var keys []string
|
||||
for _, curr := range txns {
|
||||
keys = append(keys, curr.Entry.Key)
|
||||
}
|
||||
// Lock the keys
|
||||
for _, l := range locksutil.LocksForKeys(c.locks, keys) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
}
|
||||
|
||||
if err := c.Transactional.Transaction(ctx, txns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, txn := range txns {
|
||||
if !c.ShouldCache(txn.Entry.Key) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch txn.Operation {
|
||||
case PutOperation:
|
||||
c.lru.Add(txn.Entry.Key, txn.Entry)
|
||||
c.metricSink.IncrCounter([]string{"cache", "write"}, 1)
|
||||
case DeleteOperation:
|
||||
c.lru.Remove(txn.Entry.Key)
|
||||
c.metricSink.IncrCounter([]string{"cache", "delete"}, 1)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
108
vendor/github.com/hashicorp/vault/sdk/physical/encoding.go
generated
vendored
Normal file
108
vendor/github.com/hashicorp/vault/sdk/physical/encoding.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters")
|
||||
ErrNonPrintable = errors.New("key contains non-printable characters")
|
||||
)
|
||||
|
||||
// StorageEncoding is used to add errors into underlying physical requests
|
||||
type StorageEncoding struct {
|
||||
Backend
|
||||
}
|
||||
|
||||
// TransactionalStorageEncoding is the transactional version of the error
|
||||
// injector
|
||||
type TransactionalStorageEncoding struct {
|
||||
*StorageEncoding
|
||||
Transactional
|
||||
}
|
||||
|
||||
// Verify StorageEncoding satisfies the correct interfaces
|
||||
var (
|
||||
_ Backend = (*StorageEncoding)(nil)
|
||||
_ Transactional = (*TransactionalStorageEncoding)(nil)
|
||||
)
|
||||
|
||||
// NewStorageEncoding returns a wrapped physical backend and verifies the key
|
||||
// encoding
|
||||
func NewStorageEncoding(b Backend) Backend {
|
||||
enc := &StorageEncoding{
|
||||
Backend: b,
|
||||
}
|
||||
|
||||
if bTxn, ok := b.(Transactional); ok {
|
||||
return &TransactionalStorageEncoding{
|
||||
StorageEncoding: enc,
|
||||
Transactional: bTxn,
|
||||
}
|
||||
}
|
||||
|
||||
return enc
|
||||
}
|
||||
|
||||
func (e *StorageEncoding) containsNonPrintableChars(key string) bool {
|
||||
idx := strings.IndexFunc(key, func(c rune) bool {
|
||||
return !unicode.IsPrint(c)
|
||||
})
|
||||
|
||||
return idx != -1
|
||||
}
|
||||
|
||||
func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error {
|
||||
if !utf8.ValidString(entry.Key) {
|
||||
return ErrNonUTF8
|
||||
}
|
||||
|
||||
if e.containsNonPrintableChars(entry.Key) {
|
||||
return ErrNonPrintable
|
||||
}
|
||||
|
||||
return e.Backend.Put(ctx, entry)
|
||||
}
|
||||
|
||||
func (e *StorageEncoding) Delete(ctx context.Context, key string) error {
|
||||
if !utf8.ValidString(key) {
|
||||
return ErrNonUTF8
|
||||
}
|
||||
|
||||
if e.containsNonPrintableChars(key) {
|
||||
return ErrNonPrintable
|
||||
}
|
||||
|
||||
return e.Backend.Delete(ctx, key)
|
||||
}
|
||||
|
||||
func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error {
|
||||
for _, txn := range txns {
|
||||
if !utf8.ValidString(txn.Entry.Key) {
|
||||
return ErrNonUTF8
|
||||
}
|
||||
|
||||
if e.containsNonPrintableChars(txn.Entry.Key) {
|
||||
return ErrNonPrintable
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return e.Transactional.Transaction(ctx, txns)
|
||||
}
|
||||
|
||||
func (e *StorageEncoding) Purge(ctx context.Context) {
|
||||
if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok {
|
||||
purgeable.Purge(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *StorageEncoding) SetEnabled(enabled bool) {
|
||||
if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok {
|
||||
purgeable.SetEnabled(enabled)
|
||||
}
|
||||
}
|
11
vendor/github.com/hashicorp/vault/sdk/physical/entry.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/vault/sdk/physical/entry.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package physical
|
||||
|
||||
// Entry is used to represent data stored by the physical backend
|
||||
type Entry struct {
|
||||
Key string
|
||||
Value []byte
|
||||
SealWrap bool `json:"seal_wrap,omitempty"`
|
||||
|
||||
// Only used in replication
|
||||
ValueHash []byte
|
||||
}
|
110
vendor/github.com/hashicorp/vault/sdk/physical/error.go
generated
vendored
Normal file
110
vendor/github.com/hashicorp/vault/sdk/physical/error.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultErrorPercent is used to determin how often we error
|
||||
DefaultErrorPercent = 20
|
||||
)
|
||||
|
||||
// ErrorInjector is used to add errors into underlying physical requests
|
||||
type ErrorInjector struct {
|
||||
backend Backend
|
||||
errorPercent int
|
||||
randomLock *sync.Mutex
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// TransactionalErrorInjector is the transactional version of the error
|
||||
// injector
|
||||
type TransactionalErrorInjector struct {
|
||||
*ErrorInjector
|
||||
Transactional
|
||||
}
|
||||
|
||||
// Verify ErrorInjector satisfies the correct interfaces
|
||||
var (
|
||||
_ Backend = (*ErrorInjector)(nil)
|
||||
_ Transactional = (*TransactionalErrorInjector)(nil)
|
||||
)
|
||||
|
||||
// NewErrorInjector returns a wrapped physical backend to inject error
|
||||
func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector {
|
||||
if errorPercent < 0 || errorPercent > 100 {
|
||||
errorPercent = DefaultErrorPercent
|
||||
}
|
||||
logger.Info("creating error injector")
|
||||
|
||||
return &ErrorInjector{
|
||||
backend: b,
|
||||
errorPercent: errorPercent,
|
||||
randomLock: new(sync.Mutex),
|
||||
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
}
|
||||
}
|
||||
|
||||
// NewTransactionalErrorInjector creates a new transactional ErrorInjector
|
||||
func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector {
|
||||
return &TransactionalErrorInjector{
|
||||
ErrorInjector: NewErrorInjector(b, errorPercent, logger),
|
||||
Transactional: b.(Transactional),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) SetErrorPercentage(p int) {
|
||||
e.errorPercent = p
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) addError() error {
|
||||
e.randomLock.Lock()
|
||||
roll := e.random.Intn(100)
|
||||
e.randomLock.Unlock()
|
||||
if roll < e.errorPercent {
|
||||
return errors.New("random error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error {
|
||||
if err := e.addError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.backend.Put(ctx, entry)
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) {
|
||||
if err := e.addError(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.backend.Get(ctx, key)
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) Delete(ctx context.Context, key string) error {
|
||||
if err := e.addError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.backend.Delete(ctx, key)
|
||||
}
|
||||
|
||||
func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
if err := e.addError(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.backend.List(ctx, prefix)
|
||||
}
|
||||
|
||||
func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error {
|
||||
if err := e.addError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.Transactional.Transaction(ctx, txns)
|
||||
}
|
292
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go
generated
vendored
Normal file
292
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
package inmem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
|
||||
radix "github.com/armon/go-radix"
|
||||
)
|
||||
|
||||
// Verify interfaces are satisfied
|
||||
var (
|
||||
_ physical.Backend = (*InmemBackend)(nil)
|
||||
_ physical.HABackend = (*InmemHABackend)(nil)
|
||||
_ physical.HABackend = (*TransactionalInmemHABackend)(nil)
|
||||
_ physical.Lock = (*InmemLock)(nil)
|
||||
_ physical.Transactional = (*TransactionalInmemBackend)(nil)
|
||||
_ physical.Transactional = (*TransactionalInmemHABackend)(nil)
|
||||
)
|
||||
|
||||
var (
|
||||
PutDisabledError = errors.New("put operations disabled in inmem backend")
|
||||
GetDisabledError = errors.New("get operations disabled in inmem backend")
|
||||
DeleteDisabledError = errors.New("delete operations disabled in inmem backend")
|
||||
ListDisabledError = errors.New("list operations disabled in inmem backend")
|
||||
)
|
||||
|
||||
// InmemBackend is an in-memory only physical backend. It is useful
|
||||
// for testing and development situations where the data is not
|
||||
// expected to be durable.
|
||||
type InmemBackend struct {
|
||||
sync.RWMutex
|
||||
root *radix.Tree
|
||||
permitPool *physical.PermitPool
|
||||
logger log.Logger
|
||||
failGet *uint32
|
||||
failPut *uint32
|
||||
failDelete *uint32
|
||||
failList *uint32
|
||||
logOps bool
|
||||
maxValueSize int
|
||||
}
|
||||
|
||||
type TransactionalInmemBackend struct {
|
||||
InmemBackend
|
||||
}
|
||||
|
||||
// NewInmem constructs a new in-memory backend
|
||||
func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Basically for now just creates a permit pool of size 1 so only one operation
|
||||
// can run at a time
|
||||
func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &TransactionalInmemBackend{
|
||||
InmemBackend: InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(1),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put is used to insert or update an entry
|
||||
func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error {
|
||||
i.permitPool.Acquire()
|
||||
defer i.permitPool.Release()
|
||||
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
return i.PutInternal(ctx, entry)
|
||||
}
|
||||
|
||||
func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error {
|
||||
if i.logOps {
|
||||
i.logger.Trace("put", "key", entry.Key)
|
||||
}
|
||||
if atomic.LoadUint32(i.failPut) != 0 {
|
||||
return PutDisabledError
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize {
|
||||
return fmt.Errorf("%s", physical.ErrValueTooLarge)
|
||||
}
|
||||
|
||||
i.root.Insert(entry.Key, entry.Value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InmemBackend) FailPut(fail bool) {
|
||||
var val uint32
|
||||
if fail {
|
||||
val = 1
|
||||
}
|
||||
atomic.StoreUint32(i.failPut, val)
|
||||
}
|
||||
|
||||
// Get is used to fetch an entry
|
||||
func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
||||
i.permitPool.Acquire()
|
||||
defer i.permitPool.Release()
|
||||
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
return i.GetInternal(ctx, key)
|
||||
}
|
||||
|
||||
func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) {
|
||||
if i.logOps {
|
||||
i.logger.Trace("get", "key", key)
|
||||
}
|
||||
if atomic.LoadUint32(i.failGet) != 0 {
|
||||
return nil, GetDisabledError
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if raw, ok := i.root.Get(key); ok {
|
||||
return &physical.Entry{
|
||||
Key: key,
|
||||
Value: raw.([]byte),
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i *InmemBackend) FailGet(fail bool) {
|
||||
var val uint32
|
||||
if fail {
|
||||
val = 1
|
||||
}
|
||||
atomic.StoreUint32(i.failGet, val)
|
||||
}
|
||||
|
||||
// Delete is used to permanently delete an entry
|
||||
func (i *InmemBackend) Delete(ctx context.Context, key string) error {
|
||||
i.permitPool.Acquire()
|
||||
defer i.permitPool.Release()
|
||||
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
return i.DeleteInternal(ctx, key)
|
||||
}
|
||||
|
||||
func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error {
|
||||
if i.logOps {
|
||||
i.logger.Trace("delete", "key", key)
|
||||
}
|
||||
if atomic.LoadUint32(i.failDelete) != 0 {
|
||||
return DeleteDisabledError
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
i.root.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InmemBackend) FailDelete(fail bool) {
|
||||
var val uint32
|
||||
if fail {
|
||||
val = 1
|
||||
}
|
||||
atomic.StoreUint32(i.failDelete, val)
|
||||
}
|
||||
|
||||
// List is used to list all the keys under a given
|
||||
// prefix, up to the next prefix.
|
||||
func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
i.permitPool.Acquire()
|
||||
defer i.permitPool.Release()
|
||||
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
return i.ListInternal(ctx, prefix)
|
||||
}
|
||||
|
||||
func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) {
|
||||
if i.logOps {
|
||||
i.logger.Trace("list", "prefix", prefix)
|
||||
}
|
||||
if atomic.LoadUint32(i.failList) != 0 {
|
||||
return nil, ListDisabledError
|
||||
}
|
||||
|
||||
var out []string
|
||||
seen := make(map[string]interface{})
|
||||
walkFn := func(s string, v interface{}) bool {
|
||||
trimmed := strings.TrimPrefix(s, prefix)
|
||||
sep := strings.Index(trimmed, "/")
|
||||
if sep == -1 {
|
||||
out = append(out, trimmed)
|
||||
} else {
|
||||
trimmed = trimmed[:sep+1]
|
||||
if _, ok := seen[trimmed]; !ok {
|
||||
out = append(out, trimmed)
|
||||
seen[trimmed] = struct{}{}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
i.root.WalkPrefix(prefix, walkFn)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *InmemBackend) FailList(fail bool) {
|
||||
var val uint32
|
||||
if fail {
|
||||
val = 1
|
||||
}
|
||||
atomic.StoreUint32(i.failList, val)
|
||||
}
|
||||
|
||||
// Implements the transaction interface
|
||||
func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
|
||||
t.permitPool.Acquire()
|
||||
defer t.permitPool.Release()
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
return physical.GenericTransactionHandler(ctx, t, txns)
|
||||
}
|
167
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go
generated
vendored
Normal file
167
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package inmem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
)
|
||||
|
||||
type InmemHABackend struct {
|
||||
physical.Backend
|
||||
locks map[string]string
|
||||
l *sync.Mutex
|
||||
cond *sync.Cond
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type TransactionalInmemHABackend struct {
|
||||
physical.Transactional
|
||||
InmemHABackend
|
||||
}
|
||||
|
||||
// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
|
||||
func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
be, err := NewInmem(nil, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
in := &InmemHABackend{
|
||||
Backend: be,
|
||||
locks: make(map[string]string),
|
||||
logger: logger,
|
||||
l: new(sync.Mutex),
|
||||
}
|
||||
in.cond = sync.NewCond(in.l)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
transInmem, err := NewTransactionalInmem(nil, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inmemHA := InmemHABackend{
|
||||
Backend: transInmem,
|
||||
locks: make(map[string]string),
|
||||
logger: logger,
|
||||
l: new(sync.Mutex),
|
||||
}
|
||||
|
||||
in := &TransactionalInmemHABackend{
|
||||
InmemHABackend: inmemHA,
|
||||
Transactional: transInmem.(physical.Transactional),
|
||||
}
|
||||
in.cond = sync.NewCond(in.l)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// LockWith is used for mutual exclusion based on the given key.
|
||||
func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) {
|
||||
l := &InmemLock{
|
||||
in: i,
|
||||
key: key,
|
||||
value: value,
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// LockMapSize is used in some tests to determine whether this backend has ever
|
||||
// been used for HA purposes rather than simply for storage
|
||||
func (i *InmemHABackend) LockMapSize() int {
|
||||
return len(i.locks)
|
||||
}
|
||||
|
||||
// HAEnabled indicates whether the HA functionality should be exposed.
|
||||
// Currently always returns true.
|
||||
func (i *InmemHABackend) HAEnabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// InmemLock is an in-memory Lock implementation for the HABackend
|
||||
type InmemLock struct {
|
||||
in *InmemHABackend
|
||||
key string
|
||||
value string
|
||||
|
||||
held bool
|
||||
leaderCh chan struct{}
|
||||
l sync.Mutex
|
||||
}
|
||||
|
||||
func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
i.l.Lock()
|
||||
defer i.l.Unlock()
|
||||
if i.held {
|
||||
return nil, fmt.Errorf("lock already held")
|
||||
}
|
||||
|
||||
// Attempt an async acquisition
|
||||
didLock := make(chan struct{})
|
||||
releaseCh := make(chan bool, 1)
|
||||
go func() {
|
||||
// Wait to acquire the lock
|
||||
i.in.l.Lock()
|
||||
_, ok := i.in.locks[i.key]
|
||||
for ok {
|
||||
i.in.cond.Wait()
|
||||
_, ok = i.in.locks[i.key]
|
||||
}
|
||||
i.in.locks[i.key] = i.value
|
||||
i.in.l.Unlock()
|
||||
|
||||
// Signal that lock is held
|
||||
close(didLock)
|
||||
|
||||
// Handle an early abort
|
||||
release := <-releaseCh
|
||||
if release {
|
||||
i.in.l.Lock()
|
||||
delete(i.in.locks, i.key)
|
||||
i.in.l.Unlock()
|
||||
i.in.cond.Broadcast()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for lock acquisition or shutdown
|
||||
select {
|
||||
case <-didLock:
|
||||
releaseCh <- false
|
||||
case <-stopCh:
|
||||
releaseCh <- true
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Create the leader channel
|
||||
i.held = true
|
||||
i.leaderCh = make(chan struct{})
|
||||
return i.leaderCh, nil
|
||||
}
|
||||
|
||||
func (i *InmemLock) Unlock() error {
|
||||
i.l.Lock()
|
||||
defer i.l.Unlock()
|
||||
|
||||
if !i.held {
|
||||
return nil
|
||||
}
|
||||
|
||||
close(i.leaderCh)
|
||||
i.leaderCh = nil
|
||||
i.held = false
|
||||
|
||||
i.in.l.Lock()
|
||||
delete(i.in.locks, i.key)
|
||||
i.in.l.Unlock()
|
||||
i.in.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InmemLock) Value() (bool, string, error) {
|
||||
i.in.l.Lock()
|
||||
val, ok := i.in.locks[i.key]
|
||||
i.in.l.Unlock()
|
||||
return ok, val, nil
|
||||
}
|
113
vendor/github.com/hashicorp/vault/sdk/physical/latency.go
generated
vendored
Normal file
113
vendor/github.com/hashicorp/vault/sdk/physical/latency.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
uberAtomic "go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultJitterPercent is used if no cache size is specified for NewCache
|
||||
DefaultJitterPercent = 20
|
||||
)
|
||||
|
||||
// LatencyInjector is used to add latency into underlying physical requests
|
||||
type LatencyInjector struct {
|
||||
logger log.Logger
|
||||
backend Backend
|
||||
latency *uberAtomic.Duration
|
||||
jitterPercent int
|
||||
randomLock *sync.Mutex
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// TransactionalLatencyInjector is the transactional version of the latency
|
||||
// injector
|
||||
type TransactionalLatencyInjector struct {
|
||||
*LatencyInjector
|
||||
Transactional
|
||||
}
|
||||
|
||||
// Verify LatencyInjector satisfies the correct interfaces
|
||||
var (
|
||||
_ Backend = (*LatencyInjector)(nil)
|
||||
_ Transactional = (*TransactionalLatencyInjector)(nil)
|
||||
)
|
||||
|
||||
// NewLatencyInjector returns a wrapped physical backend to simulate latency
|
||||
func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector {
|
||||
if jitter < 0 || jitter > 100 {
|
||||
jitter = DefaultJitterPercent
|
||||
}
|
||||
logger.Info("creating latency injector")
|
||||
|
||||
return &LatencyInjector{
|
||||
logger: logger,
|
||||
backend: b,
|
||||
latency: uberAtomic.NewDuration(latency),
|
||||
jitterPercent: jitter,
|
||||
randomLock: new(sync.Mutex),
|
||||
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
}
|
||||
}
|
||||
|
||||
// NewTransactionalLatencyInjector creates a new transactional LatencyInjector
|
||||
func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector {
|
||||
return &TransactionalLatencyInjector{
|
||||
LatencyInjector: NewLatencyInjector(b, latency, jitter, logger),
|
||||
Transactional: b.(Transactional),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LatencyInjector) SetLatency(latency time.Duration) {
|
||||
l.logger.Info("Changing backend latency", "latency", latency)
|
||||
l.latency.Store(latency)
|
||||
}
|
||||
|
||||
func (l *LatencyInjector) addLatency() {
|
||||
// Calculate a value between 1 +- jitter%
|
||||
percent := 100
|
||||
if l.jitterPercent > 0 {
|
||||
min := 100 - l.jitterPercent
|
||||
max := 100 + l.jitterPercent
|
||||
l.randomLock.Lock()
|
||||
percent = l.random.Intn(max-min) + min
|
||||
l.randomLock.Unlock()
|
||||
}
|
||||
latencyDuration := time.Duration(int(l.latency.Load()) * percent / 100)
|
||||
time.Sleep(latencyDuration)
|
||||
}
|
||||
|
||||
// Put is a latent put request
|
||||
func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error {
|
||||
l.addLatency()
|
||||
return l.backend.Put(ctx, entry)
|
||||
}
|
||||
|
||||
// Get is a latent get request
|
||||
func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) {
|
||||
l.addLatency()
|
||||
return l.backend.Get(ctx, key)
|
||||
}
|
||||
|
||||
// Delete is a latent delete request
|
||||
func (l *LatencyInjector) Delete(ctx context.Context, key string) error {
|
||||
l.addLatency()
|
||||
return l.backend.Delete(ctx, key)
|
||||
}
|
||||
|
||||
// List is a latent list request
|
||||
func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
l.addLatency()
|
||||
return l.backend.List(ctx, prefix)
|
||||
}
|
||||
|
||||
// Transaction is a latent transaction request
|
||||
func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error {
|
||||
l.addLatency()
|
||||
return l.Transactional.Transaction(ctx, txns)
|
||||
}
|
133
vendor/github.com/hashicorp/vault/sdk/physical/physical.go
generated
vendored
Normal file
133
vendor/github.com/hashicorp/vault/sdk/physical/physical.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
const DefaultParallelOperations = 128
|
||||
|
||||
// The operation type
|
||||
type Operation string
|
||||
|
||||
const (
|
||||
DeleteOperation Operation = "delete"
|
||||
GetOperation = "get"
|
||||
ListOperation = "list"
|
||||
PutOperation = "put"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrValueTooLarge = "put failed due to value being too large"
|
||||
)
|
||||
|
||||
// Backend is the interface required for a physical
|
||||
// backend. A physical backend is used to durably store
|
||||
// data outside of Vault. As such, it is completely untrusted,
|
||||
// and is only accessed via a security barrier. The backends
|
||||
// must represent keys in a hierarchical manner. All methods
|
||||
// are expected to be thread safe.
|
||||
type Backend interface {
|
||||
// Put is used to insert or update an entry
|
||||
Put(ctx context.Context, entry *Entry) error
|
||||
|
||||
// Get is used to fetch an entry
|
||||
Get(ctx context.Context, key string) (*Entry, error)
|
||||
|
||||
// Delete is used to permanently delete an entry
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// List is used to list all the keys under a given
|
||||
// prefix, up to the next prefix.
|
||||
List(ctx context.Context, prefix string) ([]string, error)
|
||||
}
|
||||
|
||||
// HABackend is an extensions to the standard physical
|
||||
// backend to support high-availability. Vault only expects to
|
||||
// use mutual exclusion to allow multiple instances to act as a
|
||||
// hot standby for a leader that services all requests.
|
||||
type HABackend interface {
|
||||
// LockWith is used for mutual exclusion based on the given key.
|
||||
LockWith(key, value string) (Lock, error)
|
||||
|
||||
// Whether or not HA functionality is enabled
|
||||
HAEnabled() bool
|
||||
}
|
||||
|
||||
// ToggleablePurgemonster is an interface for backends that can toggle on or
|
||||
// off special functionality and/or support purging. This is only used for the
|
||||
// cache, don't use it for other things.
|
||||
type ToggleablePurgemonster interface {
|
||||
Purge(ctx context.Context)
|
||||
SetEnabled(bool)
|
||||
}
|
||||
|
||||
// RedirectDetect is an optional interface that an HABackend
|
||||
// can implement. If they do, a redirect address can be automatically
|
||||
// detected.
|
||||
type RedirectDetect interface {
|
||||
// DetectHostAddr is used to detect the host address
|
||||
DetectHostAddr() (string, error)
|
||||
}
|
||||
|
||||
type Lock interface {
|
||||
// Lock is used to acquire the given lock
|
||||
// The stopCh is optional and if closed should interrupt the lock
|
||||
// acquisition attempt. The return struct should be closed when
|
||||
// leadership is lost.
|
||||
Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
|
||||
|
||||
// Unlock is used to release the lock
|
||||
Unlock() error
|
||||
|
||||
// Returns the value of the lock and if it is held
|
||||
Value() (bool, string, error)
|
||||
}
|
||||
|
||||
// Factory is the factory function to create a physical backend.
|
||||
type Factory func(config map[string]string, logger log.Logger) (Backend, error)
|
||||
|
||||
// PermitPool is used to limit maximum outstanding requests
|
||||
type PermitPool struct {
|
||||
sem chan int
|
||||
}
|
||||
|
||||
// NewPermitPool returns a new permit pool with the provided
|
||||
// number of permits
|
||||
func NewPermitPool(permits int) *PermitPool {
|
||||
if permits < 1 {
|
||||
permits = DefaultParallelOperations
|
||||
}
|
||||
return &PermitPool{
|
||||
sem: make(chan int, permits),
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire returns when a permit has been acquired
|
||||
func (c *PermitPool) Acquire() {
|
||||
c.sem <- 1
|
||||
}
|
||||
|
||||
// Release returns a permit to the pool
|
||||
func (c *PermitPool) Release() {
|
||||
<-c.sem
|
||||
}
|
||||
|
||||
// Get number of requests in the permit pool
|
||||
func (c *PermitPool) CurrentPermits() int {
|
||||
return len(c.sem)
|
||||
}
|
||||
|
||||
// Prefixes is a shared helper function returns all parent 'folders' for a
|
||||
// given vault key.
|
||||
// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
|
||||
func Prefixes(s string) []string {
|
||||
components := strings.Split(s, "/")
|
||||
result := []string{}
|
||||
for i := 1; i < len(components); i++ {
|
||||
result = append(result, strings.Join(components[:i], "/"))
|
||||
}
|
||||
return result
|
||||
}
|
40
vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go
generated
vendored
Normal file
40
vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// PhysicalAccess is a wrapper around physical.Backend that allows Core to
|
||||
// expose its physical storage operations through PhysicalAccess() while
|
||||
// restricting the ability to modify Core.physical itself.
|
||||
type PhysicalAccess struct {
|
||||
physical Backend
|
||||
}
|
||||
|
||||
var _ Backend = (*PhysicalAccess)(nil)
|
||||
|
||||
func NewPhysicalAccess(physical Backend) *PhysicalAccess {
|
||||
return &PhysicalAccess{physical: physical}
|
||||
}
|
||||
|
||||
func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error {
|
||||
return p.physical.Put(ctx, entry)
|
||||
}
|
||||
|
||||
func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) {
|
||||
return p.physical.Get(ctx, key)
|
||||
}
|
||||
|
||||
func (p *PhysicalAccess) Delete(ctx context.Context, key string) error {
|
||||
return p.physical.Delete(ctx, key)
|
||||
}
|
||||
|
||||
func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
return p.physical.List(ctx, prefix)
|
||||
}
|
||||
|
||||
func (p *PhysicalAccess) Purge(ctx context.Context) {
|
||||
if purgeable, ok := p.physical.(ToggleablePurgemonster); ok {
|
||||
purgeable.Purge(ctx)
|
||||
}
|
||||
}
|
94
vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go
generated
vendored
Normal file
94
vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrRelativePath = errors.New("relative paths not supported")
|
||||
|
||||
// View represents a prefixed view of a physical backend
|
||||
type View struct {
|
||||
backend Backend
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Verify View satisfies the correct interfaces
|
||||
var _ Backend = (*View)(nil)
|
||||
|
||||
// NewView takes an underlying physical backend and returns
|
||||
// a view of it that can only operate with the given prefix.
|
||||
func NewView(backend Backend, prefix string) *View {
|
||||
return &View{
|
||||
backend: backend,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// List the contents of the prefixed view
|
||||
func (v *View) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
if err := v.sanityCheck(prefix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.backend.List(ctx, v.expandKey(prefix))
|
||||
}
|
||||
|
||||
// Get the key of the prefixed view
|
||||
func (v *View) Get(ctx context.Context, key string) (*Entry, error) {
|
||||
if err := v.sanityCheck(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry, err := v.backend.Get(ctx, v.expandKey(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
entry.Key = v.truncateKey(entry.Key)
|
||||
|
||||
return &Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put the entry into the prefix view
|
||||
func (v *View) Put(ctx context.Context, entry *Entry) error {
|
||||
if err := v.sanityCheck(entry.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nested := &Entry{
|
||||
Key: v.expandKey(entry.Key),
|
||||
Value: entry.Value,
|
||||
}
|
||||
return v.backend.Put(ctx, nested)
|
||||
}
|
||||
|
||||
// Delete the entry from the prefix view
|
||||
func (v *View) Delete(ctx context.Context, key string) error {
|
||||
if err := v.sanityCheck(key); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.backend.Delete(ctx, v.expandKey(key))
|
||||
}
|
||||
|
||||
// sanityCheck is used to perform a sanity check on a key
|
||||
func (v *View) sanityCheck(key string) error {
|
||||
if strings.Contains(key, "..") {
|
||||
return ErrRelativePath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandKey is used to expand to the full key path with the prefix
|
||||
func (v *View) expandKey(suffix string) string {
|
||||
return v.prefix + suffix
|
||||
}
|
||||
|
||||
// truncateKey is used to remove the prefix of the key
|
||||
func (v *View) truncateKey(full string) string {
|
||||
return strings.TrimPrefix(full, v.prefix)
|
||||
}
|
497
vendor/github.com/hashicorp/vault/sdk/physical/testing.go
generated
vendored
Normal file
497
vendor/github.com/hashicorp/vault/sdk/physical/testing.go
generated
vendored
Normal file
@ -0,0 +1,497 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ExerciseBackend(t testing.TB, b Backend) {
|
||||
t.Helper()
|
||||
|
||||
// Should be empty
|
||||
keys, err := b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("initial list failed: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Errorf("initial not empty: %v", keys)
|
||||
}
|
||||
|
||||
// Delete should work if it does not exist
|
||||
err = b.Delete(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("idempotent delete: %v", err)
|
||||
}
|
||||
|
||||
// Get should not fail, but be nil
|
||||
out, err := b.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("initial get failed: %v", err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Errorf("initial get was not nil: %v", out)
|
||||
}
|
||||
|
||||
// Make an entry
|
||||
e := &Entry{Key: "foo", Value: []byte("test")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("put failed: %v", err)
|
||||
}
|
||||
|
||||
// Get should work
|
||||
out, err = b.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("get failed: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, e) {
|
||||
t.Errorf("bad: %v expected: %v", out, e)
|
||||
}
|
||||
|
||||
// List should not be empty
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list failed: %v", err)
|
||||
}
|
||||
if len(keys) != 1 || keys[0] != "foo" {
|
||||
t.Errorf("keys[0] did not equal foo: %v", keys)
|
||||
}
|
||||
|
||||
// Delete should work
|
||||
err = b.Delete(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("delete: %v", err)
|
||||
}
|
||||
|
||||
// Should be empty
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list after delete: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Errorf("list after delete not empty: %v", keys)
|
||||
}
|
||||
|
||||
// Get should fail
|
||||
out, err = b.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("get after delete: %v", err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Errorf("get after delete not nil: %v", out)
|
||||
}
|
||||
|
||||
// Multiple Puts should work; GH-189
|
||||
e = &Entry{Key: "foo", Value: []byte("test")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("multi put 1 failed: %v", err)
|
||||
}
|
||||
e = &Entry{Key: "foo", Value: []byte("test")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("multi put 2 failed: %v", err)
|
||||
}
|
||||
|
||||
// Make a nested entry
|
||||
e = &Entry{Key: "foo/bar", Value: []byte("baz")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("nested put failed: %v", err)
|
||||
}
|
||||
|
||||
// Get should work
|
||||
out, err = b.Get(context.Background(), "foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("get failed: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, e) {
|
||||
t.Errorf("bad: %v expected: %v", out, e)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list multi failed: %v", err)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" {
|
||||
t.Errorf("expected 2 keys [foo, foo/]: %v", keys)
|
||||
}
|
||||
|
||||
// Delete with children should work
|
||||
err = b.Delete(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("delete after multi: %v", err)
|
||||
}
|
||||
|
||||
// Get should return the child
|
||||
out, err = b.Get(context.Background(), "foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("get after multi delete: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Errorf("get after multi delete not nil: %v", out)
|
||||
}
|
||||
|
||||
// Removal of nested secret should not leave artifacts
|
||||
e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("deep nest: %v", err)
|
||||
}
|
||||
|
||||
err = b.Delete(context.Background(), "foo/nested1/nested2/nested3")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to remove deep nest: %v", err)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "foo/")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 1 || keys[0] != "bar" {
|
||||
t.Errorf("should be exactly 1 key == bar: %v", keys)
|
||||
}
|
||||
|
||||
// Make a second nested entry to test prefix removal
|
||||
e = &Entry{Key: "foo/zip", Value: []byte("zap")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create second nested: %v", err)
|
||||
}
|
||||
|
||||
// Delete should not remove the prefix
|
||||
err = b.Delete(context.Background(), "foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to delete nested prefix: %v", err)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list nested prefix: %v", err)
|
||||
}
|
||||
if len(keys) != 1 || keys[0] != "foo/" {
|
||||
t.Errorf("should be exactly 1 key == foo/: %v", keys)
|
||||
}
|
||||
|
||||
// Delete should remove the prefix
|
||||
err = b.Delete(context.Background(), "foo/zip")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to delete second prefix: %v", err)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("listing after second delete failed: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Errorf("should be empty at end: %v", keys)
|
||||
}
|
||||
|
||||
// When the root path is empty, adding and removing deep nested values should not break listing
|
||||
e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("deep nest: %v", err)
|
||||
}
|
||||
|
||||
e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")}
|
||||
err = b.Put(context.Background(), e)
|
||||
if err != nil {
|
||||
t.Fatalf("deep nest: %v", err)
|
||||
}
|
||||
|
||||
err = b.Delete(context.Background(), "foo/nested1/nested2/value2")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to remove deep nest: %v", err)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("listing of root failed after deletion: %v", err)
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys)
|
||||
keys, err = b.List(context.Background(), "foo/nested1")
|
||||
if err != nil {
|
||||
t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err)
|
||||
}
|
||||
// prove that the root should not be empty and that foo/nested1 exists
|
||||
if len(keys) != 0 {
|
||||
t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup left over listing bug test value
|
||||
err = b.Delete(context.Background(), "foo/nested1/nested2/value1")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to remove deep nest: %v", err)
|
||||
}
|
||||
|
||||
keys, err = b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("listing of root failed after delete of deep nest: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Errorf("should be empty at end: %v", keys)
|
||||
}
|
||||
}
|
||||
|
||||
func ExerciseBackend_ListPrefix(t testing.TB, b Backend) {
|
||||
t.Helper()
|
||||
|
||||
e1 := &Entry{Key: "foo", Value: []byte("test")}
|
||||
e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
|
||||
e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
|
||||
|
||||
defer func() {
|
||||
b.Delete(context.Background(), "foo")
|
||||
b.Delete(context.Background(), "foo/bar")
|
||||
b.Delete(context.Background(), "foo/bar/baz")
|
||||
}()
|
||||
|
||||
err := b.Put(context.Background(), e1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to put entry 1: %v", err)
|
||||
}
|
||||
err = b.Put(context.Background(), e2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to put entry 2: %v", err)
|
||||
}
|
||||
err = b.Put(context.Background(), e3)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to put entry 3: %v", err)
|
||||
}
|
||||
|
||||
// Scan the root
|
||||
keys, err := b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list root: %v", err)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" {
|
||||
t.Errorf("root expected [foo foo/]: %v", keys)
|
||||
}
|
||||
|
||||
// Scan foo/
|
||||
keys, err = b.List(context.Background(), "foo/")
|
||||
if err != nil {
|
||||
t.Fatalf("list level 1: %v", err)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" {
|
||||
t.Errorf("level 1 expected [bar bar/]: %v", keys)
|
||||
}
|
||||
|
||||
// Scan foo/bar/
|
||||
keys, err = b.List(context.Background(), "foo/bar/")
|
||||
if err != nil {
|
||||
t.Fatalf("list level 2: %v", err)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if len(keys) != 1 || keys[0] != "baz" {
|
||||
t.Errorf("level 1 expected [baz]: %v", keys)
|
||||
}
|
||||
}
|
||||
|
||||
func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) {
|
||||
t.Helper()
|
||||
|
||||
// Get the lock
|
||||
lock, err := b.LockWith("foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("initial lock: %v", err)
|
||||
}
|
||||
|
||||
// Attempt to lock
|
||||
leaderCh, err := lock.Lock(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("lock attempt 1: %v", err)
|
||||
}
|
||||
if leaderCh == nil {
|
||||
t.Fatalf("missing leaderCh")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err := lock.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Errorf("should be held")
|
||||
}
|
||||
if val != "bar" {
|
||||
t.Errorf("expected value bar: %v", err)
|
||||
}
|
||||
|
||||
// Second acquisition should fail
|
||||
lock2, err := b2.LockWith("foo", "baz")
|
||||
if err != nil {
|
||||
t.Fatalf("lock 2: %v", err)
|
||||
}
|
||||
|
||||
// Cancel attempt in 50 msec
|
||||
stopCh := make(chan struct{})
|
||||
time.AfterFunc(50*time.Millisecond, func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
// Attempt to lock
|
||||
leaderCh2, err := lock2.Lock(stopCh)
|
||||
if err != nil {
|
||||
t.Fatalf("stop lock 2: %v", err)
|
||||
}
|
||||
if leaderCh2 != nil {
|
||||
t.Errorf("should not have gotten leaderCh: %v", leaderCh2)
|
||||
}
|
||||
|
||||
// Release the first lock
|
||||
lock.Unlock()
|
||||
|
||||
// Attempt to lock should work
|
||||
leaderCh2, err = lock2.Lock(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("lock 2 lock: %v", err)
|
||||
}
|
||||
if leaderCh2 == nil {
|
||||
t.Errorf("should get leaderCh")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err = lock2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("value: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Errorf("should still be held")
|
||||
}
|
||||
if val != "baz" {
|
||||
t.Errorf("expected: baz, got: %v", val)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
lock2.Unlock()
|
||||
}
|
||||
|
||||
func ExerciseTransactionalBackend(t testing.TB, b Backend) {
|
||||
t.Helper()
|
||||
tb, ok := b.(Transactional)
|
||||
if !ok {
|
||||
t.Fatal("Not a transactional backend")
|
||||
}
|
||||
|
||||
txns := SetupTestingTransactions(t, b)
|
||||
|
||||
if err := tb.Transaction(context.Background(), txns); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
keys, err := b.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := []string{"foo", "zip"}
|
||||
|
||||
sort.Strings(keys)
|
||||
sort.Strings(expected)
|
||||
if !reflect.DeepEqual(keys, expected) {
|
||||
t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
|
||||
}
|
||||
|
||||
entry, err := b.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if entry == nil {
|
||||
t.Fatal("got nil entry")
|
||||
}
|
||||
if entry.Value == nil {
|
||||
t.Fatal("got nil value")
|
||||
}
|
||||
if string(entry.Value) != "bar3" {
|
||||
t.Fatal("updates did not apply correctly")
|
||||
}
|
||||
|
||||
entry, err = b.Get(context.Background(), "zip")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if entry == nil {
|
||||
t.Fatal("got nil entry")
|
||||
}
|
||||
if entry.Value == nil {
|
||||
t.Fatal("got nil value")
|
||||
}
|
||||
if string(entry.Value) != "zap3" {
|
||||
t.Fatal("updates did not apply correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry {
|
||||
t.Helper()
|
||||
// Add a few keys so that we test rollback with deletion
|
||||
if err := b.Put(context.Background(), &Entry{
|
||||
Key: "foo",
|
||||
Value: []byte("bar"),
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put(context.Background(), &Entry{
|
||||
Key: "zip",
|
||||
Value: []byte("zap"),
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put(context.Background(), &Entry{
|
||||
Key: "deleteme",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := b.Put(context.Background(), &Entry{
|
||||
Key: "deleteme2",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txns := []*TxnEntry{
|
||||
{
|
||||
Operation: PutOperation,
|
||||
Entry: &Entry{
|
||||
Key: "foo",
|
||||
Value: []byte("bar2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Operation: DeleteOperation,
|
||||
Entry: &Entry{
|
||||
Key: "deleteme",
|
||||
},
|
||||
},
|
||||
{
|
||||
Operation: PutOperation,
|
||||
Entry: &Entry{
|
||||
Key: "foo",
|
||||
Value: []byte("bar3"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Operation: DeleteOperation,
|
||||
Entry: &Entry{
|
||||
Key: "deleteme2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Operation: PutOperation,
|
||||
Entry: &Entry{
|
||||
Key: "zip",
|
||||
Value: []byte("zap3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return txns
|
||||
}
|
131
vendor/github.com/hashicorp/vault/sdk/physical/transactions.go
generated
vendored
Normal file
131
vendor/github.com/hashicorp/vault/sdk/physical/transactions.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
// TxnEntry is an operation that takes atomically as part of
|
||||
// a transactional update. Only supported by Transactional backends.
|
||||
type TxnEntry struct {
|
||||
Operation Operation
|
||||
Entry *Entry
|
||||
}
|
||||
|
||||
// Transactional is an optional interface for backends that
|
||||
// support doing transactional updates of multiple keys. This is
|
||||
// required for some features such as replication.
|
||||
type Transactional interface {
|
||||
// The function to run a transaction
|
||||
Transaction(context.Context, []*TxnEntry) error
|
||||
}
|
||||
|
||||
type TransactionalBackend interface {
|
||||
Backend
|
||||
Transactional
|
||||
}
|
||||
|
||||
type PseudoTransactional interface {
|
||||
// An internal function should do no locking or permit pool acquisition.
|
||||
// Depending on the backend and if it natively supports transactions, these
|
||||
// may simply chain to the normal backend functions.
|
||||
GetInternal(context.Context, string) (*Entry, error)
|
||||
PutInternal(context.Context, *Entry) error
|
||||
DeleteInternal(context.Context, string) error
|
||||
}
|
||||
|
||||
// Implements the transaction interface
|
||||
func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) {
|
||||
rollbackStack := make([]*TxnEntry, 0, len(txns))
|
||||
var dirty bool
|
||||
|
||||
// We walk the transactions in order; each successful operation goes into a
|
||||
// LIFO for rollback if we hit an error along the way
|
||||
TxnWalk:
|
||||
for _, txn := range txns {
|
||||
switch txn.Operation {
|
||||
case DeleteOperation:
|
||||
entry, err := t.GetInternal(ctx, txn.Entry.Key)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
dirty = true
|
||||
break TxnWalk
|
||||
}
|
||||
if entry == nil {
|
||||
// Nothing to delete or roll back
|
||||
continue
|
||||
}
|
||||
rollbackEntry := &TxnEntry{
|
||||
Operation: PutOperation,
|
||||
Entry: &Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
},
|
||||
}
|
||||
err = t.DeleteInternal(ctx, txn.Entry.Key)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
dirty = true
|
||||
break TxnWalk
|
||||
}
|
||||
rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...)
|
||||
|
||||
case PutOperation:
|
||||
entry, err := t.GetInternal(ctx, txn.Entry.Key)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
dirty = true
|
||||
break TxnWalk
|
||||
}
|
||||
// Nothing existed so in fact rolling back requires a delete
|
||||
var rollbackEntry *TxnEntry
|
||||
if entry == nil {
|
||||
rollbackEntry = &TxnEntry{
|
||||
Operation: DeleteOperation,
|
||||
Entry: &Entry{
|
||||
Key: txn.Entry.Key,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
rollbackEntry = &TxnEntry{
|
||||
Operation: PutOperation,
|
||||
Entry: &Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
err = t.PutInternal(ctx, txn.Entry)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
dirty = true
|
||||
break TxnWalk
|
||||
}
|
||||
rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...)
|
||||
}
|
||||
}
|
||||
|
||||
// Need to roll back because we hit an error along the way
|
||||
if dirty {
|
||||
// While traversing this, if we get an error, we continue anyways in
|
||||
// best-effort fashion
|
||||
for _, txn := range rollbackStack {
|
||||
switch txn.Operation {
|
||||
case DeleteOperation:
|
||||
err := t.DeleteInternal(ctx, txn.Entry.Key)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
}
|
||||
case PutOperation:
|
||||
err := t.PutInternal(ctx, txn.Entry)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
7
vendor/github.com/hashicorp/vault/sdk/version/cgo.go
generated
vendored
Normal file
7
vendor/github.com/hashicorp/vault/sdk/version/cgo.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build cgo
|
||||
|
||||
package version
|
||||
|
||||
func init() {
|
||||
CgoEnabled = true
|
||||
}
|
74
vendor/github.com/hashicorp/vault/sdk/version/version.go
generated
vendored
Normal file
74
vendor/github.com/hashicorp/vault/sdk/version/version.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// VersionInfo
|
||||
type VersionInfo struct {
|
||||
Revision string
|
||||
Version string
|
||||
VersionPrerelease string
|
||||
VersionMetadata string
|
||||
}
|
||||
|
||||
func GetVersion() *VersionInfo {
|
||||
ver := Version
|
||||
rel := VersionPrerelease
|
||||
md := VersionMetadata
|
||||
if GitDescribe != "" {
|
||||
ver = GitDescribe
|
||||
}
|
||||
if GitDescribe == "" && rel == "" && VersionPrerelease != "" {
|
||||
rel = "dev"
|
||||
}
|
||||
|
||||
return &VersionInfo{
|
||||
Revision: GitCommit,
|
||||
Version: ver,
|
||||
VersionPrerelease: rel,
|
||||
VersionMetadata: md,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VersionInfo) VersionNumber() string {
|
||||
if Version == "unknown" && VersionPrerelease == "unknown" {
|
||||
return "(version unknown)"
|
||||
}
|
||||
|
||||
version := fmt.Sprintf("%s", c.Version)
|
||||
|
||||
if c.VersionPrerelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)
|
||||
}
|
||||
|
||||
if c.VersionMetadata != "" {
|
||||
version = fmt.Sprintf("%s+%s", version, c.VersionMetadata)
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
func (c *VersionInfo) FullVersionNumber(rev bool) string {
|
||||
var versionString bytes.Buffer
|
||||
|
||||
if Version == "unknown" && VersionPrerelease == "unknown" {
|
||||
return "Vault (version unknown)"
|
||||
}
|
||||
|
||||
fmt.Fprintf(&versionString, "Vault v%s", c.Version)
|
||||
if c.VersionPrerelease != "" {
|
||||
fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
|
||||
}
|
||||
|
||||
if c.VersionMetadata != "" {
|
||||
fmt.Fprintf(&versionString, "+%s", c.VersionMetadata)
|
||||
}
|
||||
|
||||
if rev && c.Revision != "" {
|
||||
fmt.Fprintf(&versionString, " (%s)", c.Revision)
|
||||
}
|
||||
|
||||
return versionString.String()
|
||||
}
|
14
vendor/github.com/hashicorp/vault/sdk/version/version_base.go
generated
vendored
Normal file
14
vendor/github.com/hashicorp/vault/sdk/version/version_base.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package version
|
||||
|
||||
var (
|
||||
// The git commit that was compiled. This will be filled in by the compiler.
|
||||
GitCommit string
|
||||
GitDescribe string
|
||||
|
||||
// Whether cgo is enabled or not; set at build time
|
||||
CgoEnabled bool
|
||||
|
||||
Version = "1.8.0"
|
||||
VersionPrerelease = "dev"
|
||||
VersionMetadata = ""
|
||||
)
|
Reference in New Issue
Block a user