rebase: bump github.com/hashicorp/vault/api from 1.3.1 to 1.4.1

Bumps [github.com/hashicorp/vault/api](https://github.com/hashicorp/vault) from 1.3.1 to 1.4.1.
- [Release notes](https://github.com/hashicorp/vault/releases)
- [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/vault/compare/v1.3.1...v1.4.1)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/vault/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2022-02-28 20:10:36 +00:00 committed by mergify[bot]
parent 9a9c69cba2
commit b1af5f63b5
40 changed files with 1517 additions and 205 deletions

4
go.mod
View File

@ -13,7 +13,7 @@ require (
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.3.1 github.com/hashicorp/vault/api v1.4.1
github.com/kubernetes-csi/csi-lib-utils v0.10.0 github.com/kubernetes-csi/csi-lib-utils v0.10.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
@ -83,7 +83,7 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/vault v1.4.2 // indirect github.com/hashicorp/vault v1.4.2 // indirect
github.com/hashicorp/vault/sdk v0.3.0 // indirect github.com/hashicorp/vault/sdk v0.4.1 // indirect
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect

8
go.sum
View File

@ -635,8 +635,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
github.com/hashicorp/vault/api v1.3.1 h1:pkDkcgTh47PRjY1NEFeofqR4W/HkNUi9qIakESO2aRM= github.com/hashicorp/vault/api v1.4.1 h1:mWLfPT0RhxBitjKr6swieCEP2v5pp/M//t70S3kMLRo=
github.com/hashicorp/vault/api v1.3.1/go.mod h1:QeJoWxMFt+MsuWcYhmwRLwKEXrjwAFFywzhptMsTIUw= github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM=
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
@ -646,8 +646,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.3.0 h1:kR3dpxNkhh/wr6ycaJYqp6AFT/i2xaftbfnwZduTKEY= github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo=
github.com/hashicorp/vault/sdk v0.3.0/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=

View File

@ -51,6 +51,8 @@ const (
EnvRateLimit = "VAULT_RATE_LIMIT" EnvRateLimit = "VAULT_RATE_LIMIT"
EnvHTTPProxy = "VAULT_HTTP_PROXY" EnvHTTPProxy = "VAULT_HTTP_PROXY"
HeaderIndex = "X-Vault-Index" HeaderIndex = "X-Vault-Index"
HeaderForward = "X-Vault-Forward"
HeaderInconsistent = "X-Vault-Inconsistent"
) )
// Deprecated values // Deprecated values
@ -132,6 +134,12 @@ type Config struct {
// with the same client. Cloning a client will not clone this value. // with the same client. Cloning a client will not clone this value.
OutputCurlString bool OutputCurlString bool
// curlCACert, curlCAPath, curlClientCert and curlClientKey are used to keep
// track of the name of the TLS certs and keys when OutputCurlString is set.
// Cloning a client will also not clone those values.
curlCACert, curlCAPath string
curlClientCert, curlClientKey string
// SRVLookup enables the client to lookup the host through DNS SRV lookup // SRVLookup enables the client to lookup the host through DNS SRV lookup
SRVLookup bool SRVLookup bool
@ -139,6 +147,9 @@ type Config struct {
// its clone. // its clone.
CloneHeaders bool CloneHeaders bool
// CloneToken from parent.
CloneToken bool
// ReadYourWrites ensures isolated read-after-write semantics by // ReadYourWrites ensures isolated read-after-write semantics by
// providing discovered cluster replication states in each request. // providing discovered cluster replication states in each request.
// The shared state is automatically propagated to all Client clones. // The shared state is automatically propagated to all Client clones.
@ -180,7 +191,7 @@ type TLSConfig struct {
// The default Address is https://127.0.0.1:8200, but this can be overridden by // The default Address is https://127.0.0.1:8200, but this can be overridden by
// setting the `VAULT_ADDR` environment variable. // setting the `VAULT_ADDR` environment variable.
// //
// If an error is encountered, this will return nil. // If an error is encountered, the Error field on the returned *Config will be populated with the specific error.
func DefaultConfig() *Config { func DefaultConfig() *Config {
config := &Config{ config := &Config{
Address: "https://127.0.0.1:8200", Address: "https://127.0.0.1:8200",
@ -222,9 +233,9 @@ func DefaultConfig() *Config {
return config return config
} }
// ConfigureTLS takes a set of TLS configurations and applies those to the // configureTLS is a lock free version of ConfigureTLS that can be used in
// HTTP client. // ReadEnvironment where the lock is already hold
func (c *Config) ConfigureTLS(t *TLSConfig) error { func (c *Config) configureTLS(t *TLSConfig) error {
if c.HttpClient == nil { if c.HttpClient == nil {
c.HttpClient = DefaultConfig().HttpClient c.HttpClient = DefaultConfig().HttpClient
} }
@ -241,11 +252,15 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error {
return err return err
} }
foundClientCert = true foundClientCert = true
c.curlClientCert = t.ClientCert
c.curlClientKey = t.ClientKey
case t.ClientCert != "" || t.ClientKey != "": case t.ClientCert != "" || t.ClientKey != "":
return fmt.Errorf("both client cert and client key must be provided") return fmt.Errorf("both client cert and client key must be provided")
} }
if t.CACert != "" || t.CAPath != "" { if t.CACert != "" || t.CAPath != "" {
c.curlCACert = t.CACert
c.curlCAPath = t.CAPath
rootConfig := &rootcerts.Config{ rootConfig := &rootcerts.Config{
CAFile: t.CACert, CAFile: t.CACert,
CAPath: t.CAPath, CAPath: t.CAPath,
@ -275,6 +290,15 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error {
return nil return nil
} }
// ConfigureTLS takes a set of TLS configurations and applies those to the
// HTTP client.
func (c *Config) ConfigureTLS(t *TLSConfig) error {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
return c.configureTLS(t)
}
// ReadEnvironment reads configuration information from the environment. If // ReadEnvironment reads configuration information from the environment. If
// there is an error, no configuration value is updated. // there is an error, no configuration value is updated.
func (c *Config) ReadEnvironment() error { func (c *Config) ReadEnvironment() error {
@ -379,7 +403,7 @@ func (c *Config) ReadEnvironment() error {
c.SRVLookup = envSRVLookup c.SRVLookup = envSRVLookup
c.Limiter = limit c.Limiter = limit
if err := c.ConfigureTLS(t); err != nil { if err := c.configureTLS(t); err != nil {
return err return err
} }
@ -547,6 +571,7 @@ func (c *Client) CloneConfig() *Config {
newConfig.OutputCurlString = c.config.OutputCurlString newConfig.OutputCurlString = c.config.OutputCurlString
newConfig.SRVLookup = c.config.SRVLookup newConfig.SRVLookup = c.config.SRVLookup
newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneHeaders = c.config.CloneHeaders
newConfig.CloneToken = c.config.CloneToken
newConfig.ReadYourWrites = c.config.ReadYourWrites newConfig.ReadYourWrites = c.config.ReadYourWrites
// we specifically want a _copy_ of the client here, not a pointer to the original one // we specifically want a _copy_ of the client here, not a pointer to the original one
@ -775,6 +800,12 @@ func (c *Client) setNamespace(namespace string) {
c.headers.Set(consts.NamespaceHeaderName, namespace) c.headers.Set(consts.NamespaceHeaderName, namespace)
} }
func (c *Client) ClearNamespace() {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
c.headers.Del(consts.NamespaceHeaderName)
}
// Token returns the access token being used by this client. It will // Token returns the access token being used by this client. It will
// return the empty string if there is no token set. // return the empty string if there is no token set.
func (c *Client) Token() string { func (c *Client) Token() string {
@ -873,6 +904,26 @@ func (c *Client) CloneHeaders() bool {
return c.config.CloneHeaders return c.config.CloneHeaders
} }
// SetCloneToken from parent
func (c *Client) SetCloneToken(cloneToken bool) {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
c.config.CloneToken = cloneToken
}
// CloneToken gets the configured CloneToken value.
func (c *Client) CloneToken() bool {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
c.config.modifyLock.RLock()
defer c.config.modifyLock.RUnlock()
return c.config.CloneToken
}
// SetReadYourWrites to prevent reading stale cluster replication state. // SetReadYourWrites to prevent reading stale cluster replication state.
func (c *Client) SetReadYourWrites(preventStaleReads bool) { func (c *Client) SetReadYourWrites(preventStaleReads bool) {
c.modifyLock.Lock() c.modifyLock.Lock()
@ -904,12 +955,25 @@ func (c *Client) ReadYourWrites() bool {
// Clone creates a new client with the same configuration. Note that the same // Clone creates a new client with the same configuration. Note that the same
// underlying http.Client is used; modifying the client from more than one // underlying http.Client is used; modifying the client from more than one
// goroutine at once may not be safe, so modify the client as needed and then // goroutine at once may not be safe, so modify the client as needed and then
// clone. // clone. The headers are cloned based on the CloneHeaders property of the
// source config
// //
// Also, only the client's config is currently copied; this means items not in // Also, only the client's config is currently copied; this means items not in
// the api.Config struct, such as policy override and wrapping function // the api.Config struct, such as policy override and wrapping function
// behavior, must currently then be set as desired on the new client. // behavior, must currently then be set as desired on the new client.
func (c *Client) Clone() (*Client, error) { func (c *Client) Clone() (*Client, error) {
return c.clone(c.config.CloneHeaders)
}
// CloneWithHeaders creates a new client similar to Clone, with the difference
// being that the headers are always cloned
func (c *Client) CloneWithHeaders() (*Client, error) {
return c.clone(true)
}
// clone creates a new client, with the headers being cloned based on the
// passed in cloneheaders boolean
func (c *Client) clone(cloneHeaders bool) (*Client, error) {
c.modifyLock.RLock() c.modifyLock.RLock()
defer c.modifyLock.RUnlock() defer c.modifyLock.RUnlock()
@ -932,6 +996,7 @@ func (c *Client) Clone() (*Client, error) {
AgentAddress: config.AgentAddress, AgentAddress: config.AgentAddress,
SRVLookup: config.SRVLookup, SRVLookup: config.SRVLookup,
CloneHeaders: config.CloneHeaders, CloneHeaders: config.CloneHeaders,
CloneToken: config.CloneToken,
ReadYourWrites: config.ReadYourWrites, ReadYourWrites: config.ReadYourWrites,
} }
client, err := NewClient(newConfig) client, err := NewClient(newConfig)
@ -939,10 +1004,14 @@ func (c *Client) Clone() (*Client, error) {
return nil, err return nil, err
} }
if config.CloneHeaders { if cloneHeaders {
client.SetHeaders(c.Headers().Clone()) client.SetHeaders(c.Headers().Clone())
} }
if config.CloneToken {
client.SetToken(c.token)
}
client.replicationStateStore = c.replicationStateStore client.replicationStateStore = c.replicationStateStore
return client, nil return client, nil
@ -1080,6 +1149,10 @@ START:
LastOutputStringError = &OutputStringError{ LastOutputStringError = &OutputStringError{
Request: req, Request: req,
TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify,
ClientCert: c.config.curlClientCert,
ClientKey: c.config.curlClientKey,
ClientCACert: c.config.curlCACert,
ClientCAPath: c.config.curlCAPath,
} }
return nil, LastOutputStringError return nil, LastOutputStringError
} }
@ -1330,7 +1403,7 @@ func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error
// conjunction with RequireState. // conjunction with RequireState.
func ForwardInconsistent() RequestCallback { func ForwardInconsistent() RequestCallback {
return func(req *Request) { return func(req *Request) {
req.Headers.Set("X-Vault-Inconsistent", "forward-active-node") req.Headers.Set(HeaderInconsistent, "forward-active-node")
} }
} }
@ -1339,7 +1412,7 @@ func ForwardInconsistent() RequestCallback {
// This feature must be enabled in Vault's configuration. // This feature must be enabled in Vault's configuration.
func ForwardAlways() RequestCallback { func ForwardAlways() RequestCallback {
return func(req *Request) { return func(req *Request) {
req.Headers.Set("X-Vault-Forward", "active-node") req.Headers.Set(HeaderForward, "active-node")
} }
} }

View File

@ -225,7 +225,7 @@ func (r *LifetimeWatcher) Start() {
r.doneCh <- r.doRenew() r.doneCh <- r.doRenew()
} }
// Renew is for comnpatibility with the legacy api.Renewer. Calling Renew // Renew is for compatibility with the legacy api.Renewer. Calling Renew
// simply chains to Start. // simply chains to Start.
func (r *LifetimeWatcher) Renew() { func (r *LifetimeWatcher) Renew() {
r.Start() r.Start()

View File

@ -5,7 +5,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net/http"
"net/url" "net/url"
"os" "os"
"strings" "strings"
@ -145,9 +144,7 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro
func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) {
r := c.c.NewRequest("PATCH", "/v1/"+path) r := c.c.NewRequest("PATCH", "/v1/"+path)
r.Headers = http.Header{ r.Headers.Set("Content-Type", "application/merge-patch+json")
"Content-Type": []string{"application/merge-patch+json"},
}
if err := r.SetJSONBody(data); err != nil { if err := r.SetJSONBody(data); err != nil {
return nil, err return nil, err
} }

View File

@ -15,9 +15,11 @@ var LastOutputStringError *OutputStringError
type OutputStringError struct { type OutputStringError struct {
*retryablehttp.Request *retryablehttp.Request
TLSSkipVerify bool TLSSkipVerify bool
parsingError error ClientCACert, ClientCAPath string
parsedCurlString string ClientCert, ClientKey string
parsingError error
parsedCurlString string
} }
func (d *OutputStringError) Error() string { func (d *OutputStringError) Error() string {
@ -46,6 +48,22 @@ func (d *OutputStringError) parseRequest() {
if d.Request.Method != "GET" { if d.Request.Method != "GET" {
d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method) d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method)
} }
if d.ClientCACert != "" {
clientCACert := strings.Replace(d.ClientCACert, "'", "'\"'\"'", -1)
d.parsedCurlString = fmt.Sprintf("%s--cacert '%s' ", d.parsedCurlString, clientCACert)
}
if d.ClientCAPath != "" {
clientCAPath := strings.Replace(d.ClientCAPath, "'", "'\"'\"'", -1)
d.parsedCurlString = fmt.Sprintf("%s--capath '%s' ", d.parsedCurlString, clientCAPath)
}
if d.ClientCert != "" {
clientCert := strings.Replace(d.ClientCert, "'", "'\"'\"'", -1)
d.parsedCurlString = fmt.Sprintf("%s--cert '%s' ", d.parsedCurlString, clientCert)
}
if d.ClientKey != "" {
clientKey := strings.Replace(d.ClientKey, "'", "'\"'\"'", -1)
d.parsedCurlString = fmt.Sprintf("%s--key '%s' ", d.parsedCurlString, clientKey)
}
for k, v := range d.Request.Header { for k, v := range d.Request.Header {
for _, h := range v { for _, h := range v {
if strings.ToLower(k) == "x-vault-token" { if strings.ToLower(k) == "x-vault-token" {

View File

@ -182,7 +182,6 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error)
Certificates: []tls.Certificate{cert}, Certificates: []tls.Certificate{cert},
ServerName: serverCert.Subject.CommonName, ServerName: serverCert.Subject.CommonName,
} }
tlsConfig.BuildNameToCertificate()
return tlsConfig, nil return tlsConfig, nil
} }

View File

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/errwrap" "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/logical"
) )
// Secret is the structure returned for every secret within Vault. // Secret is the structure returned for every secret within Vault.
@ -297,6 +298,8 @@ type SecretAuth struct {
LeaseDuration int `json:"lease_duration"` LeaseDuration int `json:"lease_duration"`
Renewable bool `json:"renewable"` Renewable bool `json:"renewable"`
MFARequirement *logical.MFARequirement `json:"mfa_requirement"`
} }
// ParseSecret is used to parse a secret value from JSON from an io.Reader. // ParseSecret is used to parse a secret value from JSON from an io.Reader.

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
) )
@ -65,7 +66,31 @@ func (c *Sys) Unmount(path string) error {
return err return err
} }
// Remount kicks off a remount operation, polls the status endpoint using
// the migration ID till either success or failure state is observed
func (c *Sys) Remount(from, to string) error { func (c *Sys) Remount(from, to string) error {
remountResp, err := c.StartRemount(from, to)
if err != nil {
return err
}
for {
remountStatusResp, err := c.RemountStatus(remountResp.MigrationID)
if err != nil {
return err
}
if remountStatusResp.MigrationInfo.MigrationStatus == "success" {
return nil
}
if remountStatusResp.MigrationInfo.MigrationStatus == "failure" {
return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID)
}
time.Sleep(1 * time.Second)
}
}
// StartRemount kicks off a mount migration and returns a response with the migration ID
func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) {
body := map[string]interface{}{ body := map[string]interface{}{
"from": from, "from": from,
"to": to, "to": to,
@ -73,16 +98,59 @@ func (c *Sys) Remount(from, to string) error {
r := c.c.NewRequest("POST", "/v1/sys/remount") r := c.c.NewRequest("POST", "/v1/sys/remount")
if err := r.SetJSONBody(body); err != nil { if err := r.SetJSONBody(body); err != nil {
return err return nil, err
} }
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc() defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r) resp, err := c.c.RawRequestWithContext(ctx, r)
if err == nil { if err != nil {
defer resp.Body.Close() return nil, err
} }
return err defer resp.Body.Close()
secret, err := ParseSecret(resp.Body)
if err != nil {
return nil, err
}
if secret == nil || secret.Data == nil {
return nil, errors.New("data from server response is empty")
}
var result MountMigrationOutput
err = mapstructure.Decode(secret.Data, &result)
if err != nil {
return nil, err
}
return &result, err
}
// RemountStatus checks the status of a mount migration operation with the provided ID
func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) {
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/remount/status/%s", migrationID))
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
resp, err := c.c.RawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
secret, err := ParseSecret(resp.Body)
if err != nil {
return nil, err
}
if secret == nil || secret.Data == nil {
return nil, errors.New("data from server response is empty")
}
var result MountMigrationStatusOutput
err = mapstructure.Decode(secret.Data, &result)
if err != nil {
return nil, err
}
return &result, err
} }
func (c *Sys) TuneMount(path string, config MountConfigInput) error { func (c *Sys) TuneMount(path string, config MountConfigInput) error {
@ -187,3 +255,18 @@ type MountConfigOutput struct {
// Deprecated: This field will always be blank for newer server responses. // Deprecated: This field will always be blank for newer server responses.
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
} }
type MountMigrationOutput struct {
MigrationID string `mapstructure:"migration_id"`
}
type MountMigrationStatusOutput struct {
MigrationID string `mapstructure:"migration_id"`
MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"`
}
type MountMigrationStatusInfo struct {
SourceMount string `mapstructure:"source_mount"`
TargetMount string `mapstructure:"target_mount"`
MigrationStatus string `mapstructure:"status"`
}

View File

@ -33,6 +33,22 @@ import (
cbasn1 "golang.org/x/crypto/cryptobyte/asn1" cbasn1 "golang.org/x/crypto/cryptobyte/asn1"
) )
const rsaMinimumSecureKeySize = 2048
// Mapping of key types to default key lengths
var defaultAlgorithmKeyBits = map[string]int{
"rsa": 2048,
"ec": 256,
}
// Mapping of NIST P-Curve's key length to expected signature bits.
var expectedNISTPCurveHashBits = map[int]int{
224: 256,
256: 256,
384: 384,
521: 512,
}
// GetHexFormatted returns the byte buffer formatted in hex with // GetHexFormatted returns the byte buffer formatted in hex with
// the specified separator between bytes. // the specified separator between bytes.
func GetHexFormatted(buf []byte, sep string) string { func GetHexFormatted(buf []byte, sep string) string {
@ -61,21 +77,42 @@ func ParseHexFormatted(in, sep string) []byte {
return ret.Bytes() return ret.Bytes()
} }
// GetSubjKeyID returns the subject key ID, e.g. the SHA1 sum // GetSubjKeyID returns the subject key ID. The computed ID is the SHA-1 hash of
// of the marshaled public key // the marshaled public key according to
// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 (1)
func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) {
if privateKey == nil { if privateKey == nil {
return nil, errutil.InternalError{Err: "passed-in private key is nil"} return nil, errutil.InternalError{Err: "passed-in private key is nil"}
} }
return getSubjectKeyID(privateKey.Public())
}
marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public()) func getSubjectKeyID(pub interface{}) ([]byte, error) {
if err != nil { var publicKeyBytes []byte
return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} switch pub := pub.(type) {
case *rsa.PublicKey:
type pkcs1PublicKey struct {
N *big.Int
E int
}
var err error
publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{
N: pub.N,
E: pub.E,
})
if err != nil {
return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
}
case *ecdsa.PublicKey:
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
case ed25519.PublicKey:
publicKeyBytes = pub
default:
return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported public key type: %T", pub)}
} }
skid := sha1.Sum(publicKeyBytes)
subjKeyID := sha1.Sum(marshaledKey) return skid[:], nil
return subjKeyID[:], nil
} }
// ParsePKIMap takes a map (for instance, the Secret.Data // ParsePKIMap takes a map (for instance, the Secret.Data
@ -354,6 +391,9 @@ func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) {
func ParsePublicKeyPEM(data []byte) (interface{}, error) { func ParsePublicKeyPEM(data []byte) (interface{}, error) {
block, data := pem.Decode(data) block, data := pem.Decode(data)
if block != nil { if block != nil {
if len(bytes.TrimSpace(data)) > 0 {
return nil, errutil.UserError{Err: "unexpected trailing data after parsed PEM block"}
}
var rawKey interface{} var rawKey interface{}
var err error var err error
if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
@ -364,17 +404,15 @@ func ParsePublicKeyPEM(data []byte) (interface{}, error) {
} }
} }
if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok { switch key := rawKey.(type) {
return rsaPublicKey, nil case *rsa.PublicKey:
} return key, nil
if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok { case *ecdsa.PublicKey:
return ecPublicKey, nil return key, nil
} case ed25519.PublicKey:
if edPublicKey, ok := rawKey.(ed25519.PublicKey); ok { return key, nil
return edPublicKey, nil
} }
} }
return nil, errors.New("data does not contain any valid public keys") return nil, errors.New("data does not contain any valid public keys")
} }
@ -525,20 +563,118 @@ func StringToOid(in string) (asn1.ObjectIdentifier, error) {
return asn1.ObjectIdentifier(ret), nil return asn1.ObjectIdentifier(ret), nil
} }
func ValidateSignatureLength(keyBits int) error { // Returns default key bits for the specified key type, or the present value
switch keyBits { // if keyBits is non-zero.
func DefaultOrValueKeyBits(keyType string, keyBits int) (int, error) {
if keyBits == 0 {
newValue, present := defaultAlgorithmKeyBits[keyType]
if present {
keyBits = newValue
} /* else {
// We cannot return an error here as ed25519 (and potentially ed448
// in the future) aren't in defaultAlgorithmKeyBits -- the value of
// the keyBits parameter is ignored under that algorithm.
} */
}
return keyBits, nil
}
// Returns default signature hash bit length for the specified key type and
// bits, or the present value if hashBits is non-zero. Returns an error under
// certain internal circumstances.
func DefaultOrValueHashBits(keyType string, keyBits int, hashBits int) (int, error) {
if keyType == "ec" {
// To comply with BSI recommendations Section 4.2 and Mozilla root
// store policy section 5.1.2, enforce that NIST P-curves use a hash
// length corresponding to curve length. Note that ed25519 does not
// the "ec" key type.
expectedHashBits := expectedNISTPCurveHashBits[keyBits]
if expectedHashBits != hashBits && hashBits != 0 {
return hashBits, fmt.Errorf("unsupported signature hash algorithm length (%d) for NIST P-%d", hashBits, keyBits)
} else if hashBits == 0 {
hashBits = expectedHashBits
}
} else if keyType == "rsa" && hashBits == 0 {
// To match previous behavior (and ignoring NIST's recommendations for
// hash size to align with RSA key sizes), default to SHA-2-256.
hashBits = 256
} else if keyType == "ed25519" || keyType == "ed448" {
// No-op; ed25519 and ed448 internally specify their own hash and
// we do not need to select one. Double hashing isn't supported in
// certificate signing and we must
return 0, nil
}
return hashBits, nil
}
// Validates that the combination of keyType, keyBits, and hashBits are
// valid together; replaces individual calls to ValidateSignatureLength and
// ValidateKeyTypeLength. Also updates the value of keyBits and hashBits on
// return.
func ValidateDefaultOrValueKeyTypeSignatureLength(keyType string, keyBits int, hashBits int) (int, int, error) {
var err error
if keyBits, err = DefaultOrValueKeyBits(keyType, keyBits); err != nil {
return keyBits, hashBits, err
}
if err = ValidateKeyTypeLength(keyType, keyBits); err != nil {
return keyBits, hashBits, err
}
if hashBits, err = DefaultOrValueHashBits(keyType, keyBits, hashBits); err != nil {
return keyBits, hashBits, err
}
// Note that this check must come after we've selected a value for
// hashBits above, in the event it was left as the default, but we
// were allowed to update it.
if err = ValidateSignatureLength(keyType, hashBits); err != nil {
return keyBits, hashBits, err
}
return keyBits, hashBits, nil
}
// Validates that the length of the hash (in bits) used in the signature
// calculation is a known, approved value.
func ValidateSignatureLength(keyType string, hashBits int) error {
if keyType == "ed25519" || keyType == "ed448" {
// ed25519 and ed448 include built-in hashing and is not externally
// configurable. There are three modes for each of these schemes:
//
// 1. Built-in hash (default, used in TLS, x509).
// 2. Double hash (notably used in some block-chain implementations,
// but largely regarded as a specialized use case with security
// concerns).
// 3. No hash (bring your own hash function, less commonly used).
//
// In all cases, we won't have a hash algorithm to validate here, so
// return nil.
return nil
}
switch hashBits {
case 256: case 256:
case 384: case 384:
case 512: case 512:
default: default:
return fmt.Errorf("unsupported signature algorithm: %d", keyBits) return fmt.Errorf("unsupported hash signature algorithm: %d", hashBits)
} }
return nil return nil
} }
func ValidateKeyTypeLength(keyType string, keyBits int) error { func ValidateKeyTypeLength(keyType string, keyBits int) error {
switch keyType { switch keyType {
case "rsa": case "rsa":
if keyBits < rsaMinimumSecureKeySize {
return fmt.Errorf("RSA keys < %d bits are unsafe and not supported: got %d", rsaMinimumSecureKeySize, keyBits)
}
switch keyBits { switch keyBits {
case 2048: case 2048:
case 3072: case 3072:
@ -548,12 +684,8 @@ func ValidateKeyTypeLength(keyType string, keyBits int) error {
return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits)
} }
case "ec": case "ec":
switch keyBits { _, present := expectedNISTPCurveHashBits[keyBits]
case 224: if !present {
case 256:
case 384:
case 521:
default:
return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) return fmt.Errorf("unsupported bit length for EC key: %d", keyBits)
} }
case "any", "ed25519": case "any", "ed25519":
@ -567,16 +699,23 @@ func ValidateKeyTypeLength(keyType string, keyBits int) error {
// CreateCertificate uses CreationBundle and the default rand.Reader to // CreateCertificate uses CreationBundle and the default rand.Reader to
// generate a cert/keypair. // generate a cert/keypair.
func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) {
return createCertificate(data, rand.Reader) return createCertificate(data, rand.Reader, generatePrivateKey)
} }
// CreateCertificateWithRandomSource uses CreationBundle and a custom // CreateCertificateWithRandomSource uses CreationBundle and a custom
// io.Reader for randomness to generate a cert/keypair. // io.Reader for randomness to generate a cert/keypair.
func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) {
return createCertificate(data, randReader) return createCertificate(data, randReader, generatePrivateKey)
} }
func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { // KeyGenerator Allow us to override how/what generates the private key
type KeyGenerator func(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error
func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCertBundle, error) {
return createCertificate(data, randReader, keyGenerator)
}
func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) {
var err error var err error
result := &ParsedCertBundle{} result := &ParsedCertBundle{}
@ -585,7 +724,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB
return nil, err return nil, err
} }
if err := generatePrivateKey(data.Params.KeyType, if err := privateKeyGenerator(data.Params.KeyType,
data.Params.KeyBits, data.Params.KeyBits,
result, randReader); err != nil { result, randReader); err != nil {
return nil, err return nil, err
@ -655,14 +794,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB
case Ed25519PrivateKey: case Ed25519PrivateKey:
certTemplate.SignatureAlgorithm = x509.PureEd25519 certTemplate.SignatureAlgorithm = x509.PureEd25519
case ECPrivateKey: case ECPrivateKey:
switch data.Params.SignatureBits { certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(data.SigningBundle.PrivateKey.Public(), data.Params.SignatureBits)
case 256:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
case 384:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384
case 512:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512
}
} }
caCert := data.SigningBundle.Certificate caCert := data.SigningBundle.Certificate
@ -691,14 +823,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB
case "ed25519": case "ed25519":
certTemplate.SignatureAlgorithm = x509.PureEd25519 certTemplate.SignatureAlgorithm = x509.PureEd25519
case "ec": case "ec":
switch data.Params.SignatureBits { certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits)
case 256:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
case 384:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384
case 512:
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512
}
} }
certTemplate.AuthorityKeyId = subjKeyID certTemplate.AuthorityKeyId = subjKeyID
@ -733,26 +858,59 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB
return result, nil return result, nil
} }
func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x509.SignatureAlgorithm {
// If signature bits are configured, prefer them to the default choice.
switch signatureBits {
case 256:
return x509.ECDSAWithSHA256
case 384:
return x509.ECDSAWithSHA384
case 512:
return x509.ECDSAWithSHA512
}
key, ok := pub.(*ecdsa.PublicKey)
if !ok {
return x509.ECDSAWithSHA256
}
switch key.Curve {
case elliptic.P224(), elliptic.P256():
return x509.ECDSAWithSHA256
case elliptic.P384():
return x509.ECDSAWithSHA384
case elliptic.P521():
return x509.ECDSAWithSHA512
default:
return x509.ECDSAWithSHA256
}
}
var oidExtensionBasicConstraints = []int{2, 5, 29, 19} var oidExtensionBasicConstraints = []int{2, 5, 29, 19}
// CreateCSR creates a CSR with the default rand.Reader to // CreateCSR creates a CSR with the default rand.Reader to
// generate a cert/keypair. This is currently only meant // generate a cert/keypair. This is currently only meant
// for use when generating an intermediate certificate. // for use when generating an intermediate certificate.
func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) {
return createCSR(data, addBasicConstraints, rand.Reader) return createCSR(data, addBasicConstraints, rand.Reader, generatePrivateKey)
} }
// CreateCSRWithRandomSource creates a CSR with a custom io.Reader // CreateCSRWithRandomSource creates a CSR with a custom io.Reader
// for randomness to generate a cert/keypair. // for randomness to generate a cert/keypair.
func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) {
return createCSR(data, addBasicConstraints, randReader) return createCSR(data, addBasicConstraints, randReader, generatePrivateKey)
} }
func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { // CreateCSRWithKeyGenerator creates a CSR with a custom io.Reader
// for randomness to generate a cert/keypair with the provided private key generator.
func CreateCSRWithKeyGenerator(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) {
return createCSR(data, addBasicConstraints, randReader, keyGenerator)
}
func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) {
var err error var err error
result := &ParsedCSRBundle{} result := &ParsedCSRBundle{}
if err := generatePrivateKey(data.Params.KeyType, if err := keyGenerator(data.Params.KeyType,
data.Params.KeyBits, data.Params.KeyBits,
result, randReader); err != nil { result, randReader); err != nil {
return nil, err return nil, err
@ -849,11 +1007,10 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun
return nil, err return nil, err
} }
marshaledKey, err := x509.MarshalPKIXPublicKey(data.CSR.PublicKey) subjKeyID, err := getSubjectKeyID(data.CSR.PublicKey)
if err != nil { if err != nil {
return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} return nil, err
} }
subjKeyID := sha1.Sum(marshaledKey)
caCert := data.SigningBundle.Certificate caCert := data.SigningBundle.Certificate

View File

@ -58,6 +58,7 @@ const (
RSAPrivateKey PrivateKeyType = "rsa" RSAPrivateKey PrivateKeyType = "rsa"
ECPrivateKey PrivateKeyType = "ec" ECPrivateKey PrivateKeyType = "ec"
Ed25519PrivateKey PrivateKeyType = "ed25519" Ed25519PrivateKey PrivateKeyType = "ed25519"
ManagedPrivateKey PrivateKeyType = "ManagedPrivateKey"
) )
// TLSUsage controls whether the intended usage of a *tls.Config // TLSUsage controls whether the intended usage of a *tls.Config
@ -158,46 +159,21 @@ func (c *CertBundle) ToPEMBundle() string {
// ToParsedCertBundle converts a string-based certificate bundle // ToParsedCertBundle converts a string-based certificate bundle
// to a byte-based raw certificate bundle // to a byte-based raw certificate bundle
func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
result := &ParsedCertBundle{} return c.ToParsedCertBundleWithExtractor(extractAndSetPrivateKey)
}
// PrivateKeyExtractor extract out a private key from the passed in
// CertBundle and set the appropriate bits within the ParsedCertBundle.
type PrivateKeyExtractor func(c *CertBundle, parsedBundle *ParsedCertBundle) error
func (c *CertBundle) ToParsedCertBundleWithExtractor(privateKeyExtractor PrivateKeyExtractor) (*ParsedCertBundle, error) {
var err error var err error
var pemBlock *pem.Block var pemBlock *pem.Block
result := &ParsedCertBundle{}
if len(c.PrivateKey) > 0 { err = privateKeyExtractor(c, result)
pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) if err != nil {
if pemBlock == nil { return nil, err
return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"}
}
result.PrivateKeyBytes = pemBlock.Bytes
result.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type))
switch result.PrivateKeyFormat {
case ECBlock:
result.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey
case PKCS1Block:
c.PrivateKeyType, result.PrivateKeyType = RSAPrivateKey, RSAPrivateKey
case PKCS8Block:
t, err := getPKCS8Type(pemBlock.Bytes)
if err != nil {
return nil, errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)}
}
result.PrivateKeyType = t
switch t {
case ECPrivateKey:
c.PrivateKeyType = ECPrivateKey
case RSAPrivateKey:
c.PrivateKeyType = RSAPrivateKey
case Ed25519PrivateKey:
c.PrivateKeyType = Ed25519PrivateKey
}
default:
return nil, errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)}
}
result.PrivateKey, err = result.getSigner()
if err != nil {
return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
}
} }
if len(c.Certificate) > 0 { if len(c.Certificate) > 0 {
@ -258,6 +234,52 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
return result, nil return result, nil
} }
func extractAndSetPrivateKey(c *CertBundle, parsedBundle *ParsedCertBundle) error {
if len(c.PrivateKey) == 0 {
return nil
}
pemBlock, _ := pem.Decode([]byte(c.PrivateKey))
if pemBlock == nil {
return errutil.UserError{Err: "Error decoding private key from cert bundle"}
}
parsedBundle.PrivateKeyBytes = pemBlock.Bytes
parsedBundle.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type))
switch parsedBundle.PrivateKeyFormat {
case ECBlock:
parsedBundle.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey
case PKCS1Block:
c.PrivateKeyType, parsedBundle.PrivateKeyType = RSAPrivateKey, RSAPrivateKey
case PKCS8Block:
t, err := getPKCS8Type(pemBlock.Bytes)
if err != nil {
return errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)}
}
parsedBundle.PrivateKeyType = t
switch t {
case ECPrivateKey:
c.PrivateKeyType = ECPrivateKey
case RSAPrivateKey:
c.PrivateKeyType = RSAPrivateKey
case Ed25519PrivateKey:
c.PrivateKeyType = Ed25519PrivateKey
case ManagedPrivateKey:
c.PrivateKeyType = ManagedPrivateKey
}
default:
return errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)}
}
var err error
parsedBundle.PrivateKey, err = parsedBundle.getSigner()
if err != nil {
return errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
}
return nil
}
// ToCertBundle converts a byte-based raw DER certificate bundle // ToCertBundle converts a byte-based raw DER certificate bundle
// to a PEM-based string certificate bundle // to a PEM-based string certificate bundle
func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) {
@ -505,6 +527,9 @@ func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) {
case Ed25519PrivateKey: case Ed25519PrivateKey:
result.PrivateKeyType = "ed25519" result.PrivateKeyType = "ed25519"
block.Type = "PRIVATE KEY" block.Type = "PRIVATE KEY"
case ManagedPrivateKey:
result.PrivateKeyType = ManagedPrivateKey
block.Type = "PRIVATE KEY"
default: default:
return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"}
} }
@ -613,7 +638,6 @@ func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) {
if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 {
tlsConfig.Certificates = []tls.Certificate{tlsCert} tlsConfig.Certificates = []tls.Certificate{tlsCert}
tlsConfig.BuildNameToCertificate()
} }
return tlsConfig, nil return tlsConfig, nil
@ -663,6 +687,21 @@ func (b *CAInfoBundle) GetCAChain() []*CertBlock {
return chain return chain
} }
func (b *CAInfoBundle) GetFullChain() []*CertBlock {
var chain []*CertBlock
chain = append(chain, &CertBlock{
Certificate: b.Certificate,
Bytes: b.CertificateBytes,
})
if len(b.CAChain) > 0 {
chain = append(chain, b.CAChain...)
}
return chain
}
type CertExtKeyUsage int type CertExtKeyUsage int
const ( const (

View File

@ -141,10 +141,21 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
// If the first byte isn't a canary byte, then the utility returns a boolean // If the first byte isn't a canary byte, then the utility returns a boolean
// value indicating that the input was not compressed. // value indicating that the input was not compressed.
func Decompress(data []byte) ([]byte, bool, error) { func Decompress(data []byte) ([]byte, bool, error) {
bytes, _, notCompressed, err := DecompressWithCanary(data)
return bytes, notCompressed, err
}
// DecompressWithCanary checks if the first byte in the input matches the canary byte.
// If the first byte is a canary byte, then the input past the canary byte
// will be decompressed using the method specified in the given configuration. The type of compression used is also
// returned. If the first byte isn't a canary byte, then the utility returns a boolean
// value indicating that the input was not compressed.
func DecompressWithCanary(data []byte) ([]byte, string, bool, error) {
var err error var err error
var reader io.ReadCloser var reader io.ReadCloser
var compressionType string
if data == nil || len(data) == 0 { if data == nil || len(data) == 0 {
return nil, false, fmt.Errorf("'data' being decompressed is empty") return nil, "", false, fmt.Errorf("'data' being decompressed is empty")
} }
canary := data[0] canary := data[0]
@ -155,43 +166,47 @@ func Decompress(data []byte) ([]byte, bool, error) {
// byte and try to decompress the data that is after the canary. // byte and try to decompress the data that is after the canary.
case CompressionCanaryGzip: case CompressionCanaryGzip:
if len(data) < 2 { if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary") return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
} }
reader, err = gzip.NewReader(bytes.NewReader(cData)) reader, err = gzip.NewReader(bytes.NewReader(cData))
compressionType = CompressionTypeGzip
case CompressionCanaryLZW: case CompressionCanaryLZW:
if len(data) < 2 { if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary") return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
} }
reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8)
compressionType = CompressionTypeLZW
case CompressionCanarySnappy: case CompressionCanarySnappy:
if len(data) < 2 { if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary") return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
} }
reader = &CompressUtilReadCloser{ reader = &CompressUtilReadCloser{
Reader: snappy.NewReader(bytes.NewReader(cData)), Reader: snappy.NewReader(bytes.NewReader(cData)),
} }
compressionType = CompressionTypeSnappy
case CompressionCanaryLZ4: case CompressionCanaryLZ4:
if len(data) < 2 { if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary") return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
} }
reader = &CompressUtilReadCloser{ reader = &CompressUtilReadCloser{
Reader: lz4.NewReader(bytes.NewReader(cData)), Reader: lz4.NewReader(bytes.NewReader(cData)),
} }
compressionType = CompressionTypeLZ4
default: default:
// If the first byte doesn't match the canary byte, it means // If the first byte doesn't match the canary byte, it means
// that the content was not compressed at all. Indicate the // that the content was not compressed at all. Indicate the
// caller that the input was not compressed. // caller that the input was not compressed.
return nil, true, nil return nil, "", true, nil
} }
if err != nil { if err != nil {
return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err)
} }
if reader == nil { if reader == nil {
return nil, false, fmt.Errorf("failed to create a compression reader") return nil, "", false, fmt.Errorf("failed to create a compression reader")
} }
// Close the io.ReadCloser // Close the io.ReadCloser
@ -200,8 +215,8 @@ func Decompress(data []byte) ([]byte, bool, error) {
// Read all the compressed data into a buffer // Read all the compressed data into a buffer
var buf bytes.Buffer var buf bytes.Buffer
if _, err = io.Copy(&buf, reader); err != nil { if _, err = io.Copy(&buf, reader); err != nil {
return nil, false, err return nil, "", false, err
} }
return buf.Bytes(), false, nil return buf.Bytes(), compressionType, false, nil
} }

View File

@ -3,3 +3,10 @@ package consts
// AgentPathCacheClear is the path that the agent will use as its cache-clear // AgentPathCacheClear is the path that the agent will use as its cache-clear
// endpoint. // endpoint.
const AgentPathCacheClear = "/agent/v1/cache-clear" const AgentPathCacheClear = "/agent/v1/cache-clear"
// AgentPathMetrics is the path the the agent will use to expose its internal
// metrics.
const AgentPathMetrics = "/agent/v1/metrics"
// AgentPathQuit is the path that the agent will use to trigger stopping it.
const AgentPathQuit = "/agent/v1/quit"

View File

@ -0,0 +1,10 @@
package consts
const (
ServiceTokenPrefix = "hvs."
BatchTokenPrefix = "hvb."
RecoveryTokenPrefix = "hvr."
LegacyServiceTokenPrefix = "s."
LegacyBatchTokenPrefix = "b."
LegacyRecoveryTokenPrefix = "r."
)

View File

@ -0,0 +1,47 @@
package pluginutil
import (
context "context"
"fmt"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
type PluginMultiplexingServerImpl struct {
UnimplementedPluginMultiplexingServer
Supported bool
}
func (pm PluginMultiplexingServerImpl) MultiplexingSupport(ctx context.Context, req *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) {
return &MultiplexingSupportResponse{
Supported: pm.Supported,
}, nil
}
func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface) (bool, error) {
if cc == nil {
return false, fmt.Errorf("client connection is nil")
}
req := new(MultiplexingSupportRequest)
resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req)
if err != nil {
// If the server does not implement the multiplexing server then we can
// assume it is not multiplexed
if status.Code(err) == codes.Unimplemented {
return false, nil
}
return false, err
}
if resp == nil {
// Somehow got a nil response, assume not multiplexed
return false, nil
}
return resp.Supported, nil
}

View File

@ -0,0 +1,213 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.19.3
// source: sdk/helper/pluginutil/multiplexing.proto
package pluginutil
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MultiplexingSupportRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *MultiplexingSupportRequest) Reset() {
*x = MultiplexingSupportRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MultiplexingSupportRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MultiplexingSupportRequest) ProtoMessage() {}
func (x *MultiplexingSupportRequest) ProtoReflect() protoreflect.Message {
mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MultiplexingSupportRequest.ProtoReflect.Descriptor instead.
func (*MultiplexingSupportRequest) Descriptor() ([]byte, []int) {
return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{0}
}
type MultiplexingSupportResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Supported bool `protobuf:"varint,1,opt,name=supported,proto3" json:"supported,omitempty"`
}
func (x *MultiplexingSupportResponse) Reset() {
*x = MultiplexingSupportResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MultiplexingSupportResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MultiplexingSupportResponse) ProtoMessage() {}
func (x *MultiplexingSupportResponse) ProtoReflect() protoreflect.Message {
mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MultiplexingSupportResponse.ProtoReflect.Descriptor instead.
func (*MultiplexingSupportResponse) Descriptor() ([]byte, []int) {
return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{1}
}
func (x *MultiplexingSupportResponse) GetSupported() bool {
if x != nil {
return x.Supported
}
return false
}
var File_sdk_helper_pluginutil_multiplexing_proto protoreflect.FileDescriptor
var file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = []byte{
0x0a, 0x28, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75,
0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
0x78, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x70, 0x6c, 0x75, 0x67,
0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78,
0x69, 0x6e, 0x67, 0x22, 0x1c, 0x0a, 0x1a, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78,
0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x22, 0x3b, 0x0a, 0x1b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e,
0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x32, 0x97,
0x01, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
0x65, 0x78, 0x69, 0x6e, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x13, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70,
0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x2e,
0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69,
0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x34, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e,
0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c,
0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65,
0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce sync.Once
file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = file_sdk_helper_pluginutil_multiplexing_proto_rawDesc
)
func file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP() []byte {
file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce.Do(func() {
file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_pluginutil_multiplexing_proto_rawDescData)
})
return file_sdk_helper_pluginutil_multiplexing_proto_rawDescData
}
var file_sdk_helper_pluginutil_multiplexing_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []interface{}{
(*MultiplexingSupportRequest)(nil), // 0: pluginutil.multiplexing.MultiplexingSupportRequest
(*MultiplexingSupportResponse)(nil), // 1: pluginutil.multiplexing.MultiplexingSupportResponse
}
var file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = []int32{
0, // 0: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:input_type -> pluginutil.multiplexing.MultiplexingSupportRequest
1, // 1: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:output_type -> pluginutil.multiplexing.MultiplexingSupportResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_sdk_helper_pluginutil_multiplexing_proto_init() }
func file_sdk_helper_pluginutil_multiplexing_proto_init() {
if File_sdk_helper_pluginutil_multiplexing_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MultiplexingSupportRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MultiplexingSupportResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_sdk_helper_pluginutil_multiplexing_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_sdk_helper_pluginutil_multiplexing_proto_goTypes,
DependencyIndexes: file_sdk_helper_pluginutil_multiplexing_proto_depIdxs,
MessageInfos: file_sdk_helper_pluginutil_multiplexing_proto_msgTypes,
}.Build()
File_sdk_helper_pluginutil_multiplexing_proto = out.File
file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = nil
file_sdk_helper_pluginutil_multiplexing_proto_goTypes = nil
file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = nil
}

View File

@ -0,0 +1,13 @@
syntax = "proto3";
package pluginutil.multiplexing;
option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil";
message MultiplexingSupportRequest {}
message MultiplexingSupportResponse {
bool supported = 1;
}
service PluginMultiplexing {
rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse);
}

View File

@ -0,0 +1,101 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package pluginutil
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// PluginMultiplexingClient is the client API for PluginMultiplexing service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type PluginMultiplexingClient interface {
MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error)
}
type pluginMultiplexingClient struct {
cc grpc.ClientConnInterface
}
func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexingClient {
return &pluginMultiplexingClient{cc}
}
func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) {
out := new(MultiplexingSupportResponse)
err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// PluginMultiplexingServer is the server API for PluginMultiplexing service.
// All implementations must embed UnimplementedPluginMultiplexingServer
// for forward compatibility
type PluginMultiplexingServer interface {
MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error)
mustEmbedUnimplementedPluginMultiplexingServer()
}
// UnimplementedPluginMultiplexingServer must be embedded to have forward compatible implementations.
type UnimplementedPluginMultiplexingServer struct {
}
func (UnimplementedPluginMultiplexingServer) MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method MultiplexingSupport not implemented")
}
func (UnimplementedPluginMultiplexingServer) mustEmbedUnimplementedPluginMultiplexingServer() {}
// UnsafePluginMultiplexingServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to PluginMultiplexingServer will
// result in compilation errors.
type UnsafePluginMultiplexingServer interface {
mustEmbedUnimplementedPluginMultiplexingServer()
}
func RegisterPluginMultiplexingServer(s grpc.ServiceRegistrar, srv PluginMultiplexingServer) {
s.RegisterService(&PluginMultiplexing_ServiceDesc, srv)
}
func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MultiplexingSupportRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest))
}
return interceptor(ctx, in, info, handler)
}
// PluginMultiplexing_ServiceDesc is the grpc.ServiceDesc for PluginMultiplexing service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var PluginMultiplexing_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pluginutil.multiplexing.PluginMultiplexing",
HandlerType: (*PluginMultiplexingServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "MultiplexingSupport",
Handler: _PluginMultiplexing_MultiplexingSupport_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "sdk/helper/pluginutil/multiplexing.proto",
}

View File

@ -9,9 +9,21 @@ import (
log "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-plugin" "github.com/hashicorp/go-plugin"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/sdk/version"
) )
type PluginClientConfig struct {
Name string
PluginType consts.PluginType
PluginSets map[int]plugin.PluginSet
HandshakeConfig plugin.HandshakeConfig
Logger log.Logger
IsMetadataMode bool
AutoMTLS bool
MLock bool
}
type runConfig struct { type runConfig struct {
// Provided by PluginRunner // Provided by PluginRunner
command string command string
@ -21,12 +33,9 @@ type runConfig struct {
// Initialized with what's in PluginRunner.Env, but can be added to // Initialized with what's in PluginRunner.Env, but can be added to
env []string env []string
wrapper RunnerUtil wrapper RunnerUtil
pluginSets map[int]plugin.PluginSet
hs plugin.HandshakeConfig PluginClientConfig
logger log.Logger
isMetadataMode bool
autoMTLS bool
} }
func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) {
@ -34,19 +43,19 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
cmd.Env = append(cmd.Env, rc.env...) cmd.Env = append(cmd.Env, rc.env...)
// Add the mlock setting to the ENV of the plugin // Add the mlock setting to the ENV of the plugin
if rc.wrapper != nil && rc.wrapper.MlockEnabled() { if rc.MLock || (rc.wrapper != nil && rc.wrapper.MlockEnabled()) {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true"))
} }
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version))
if rc.isMetadataMode { if rc.IsMetadataMode {
rc.logger = rc.logger.With("metadata", "true") rc.Logger = rc.Logger.With("metadata", "true")
} }
metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.isMetadataMode) metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode)
cmd.Env = append(cmd.Env, metadataEnv) cmd.Env = append(cmd.Env, metadataEnv)
var clientTLSConfig *tls.Config var clientTLSConfig *tls.Config
if !rc.autoMTLS && !rc.isMetadataMode { if !rc.AutoMTLS && !rc.IsMetadataMode {
// Get a CA TLS Certificate // Get a CA TLS Certificate
certBytes, key, err := generateCert() certBytes, key, err := generateCert()
if err != nil { if err != nil {
@ -76,17 +85,17 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
} }
clientConfig := &plugin.ClientConfig{ clientConfig := &plugin.ClientConfig{
HandshakeConfig: rc.hs, HandshakeConfig: rc.HandshakeConfig,
VersionedPlugins: rc.pluginSets, VersionedPlugins: rc.PluginSets,
Cmd: cmd, Cmd: cmd,
SecureConfig: secureConfig, SecureConfig: secureConfig,
TLSConfig: clientTLSConfig, TLSConfig: clientTLSConfig,
Logger: rc.logger, Logger: rc.Logger,
AllowedProtocols: []plugin.Protocol{ AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC, plugin.ProtocolNetRPC,
plugin.ProtocolGRPC, plugin.ProtocolGRPC,
}, },
AutoMTLS: rc.autoMTLS, AutoMTLS: rc.AutoMTLS,
} }
return clientConfig, nil return clientConfig, nil
} }
@ -117,31 +126,37 @@ func Runner(wrapper RunnerUtil) RunOpt {
func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt { func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.pluginSets = pluginSets rc.PluginSets = pluginSets
} }
} }
func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt { func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.hs = hs rc.HandshakeConfig = hs
} }
} }
func Logger(logger log.Logger) RunOpt { func Logger(logger log.Logger) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.logger = logger rc.Logger = logger
} }
} }
func MetadataMode(isMetadataMode bool) RunOpt { func MetadataMode(isMetadataMode bool) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.isMetadataMode = isMetadataMode rc.IsMetadataMode = isMetadataMode
} }
} }
func AutoMTLS(autoMTLS bool) RunOpt { func AutoMTLS(autoMTLS bool) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.autoMTLS = autoMTLS rc.AutoMTLS = autoMTLS
}
}
func MLock(mlock bool) RunOpt {
return func(rc *runConfig) {
rc.MLock = mlock
} }
} }

View File

@ -8,6 +8,7 @@ import (
plugin "github.com/hashicorp/go-plugin" plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/helper/wrapping"
"google.golang.org/grpc"
) )
// Looker defines the plugin Lookup function that looks into the plugin catalog // Looker defines the plugin Lookup function that looks into the plugin catalog
@ -21,6 +22,7 @@ type Looker interface {
// configuration and wrapping data in a response wrapped token. // configuration and wrapping data in a response wrapped token.
// logical.SystemView implementations satisfy this interface. // logical.SystemView implementations satisfy this interface.
type RunnerUtil interface { type RunnerUtil interface {
NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error)
ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
MlockEnabled() bool MlockEnabled() bool
} }
@ -31,6 +33,13 @@ type LookRunnerUtil interface {
RunnerUtil RunnerUtil
} }
type PluginClient interface {
Conn() grpc.ClientConnInterface
plugin.ClientProtocol
}
const MultiplexingCtxKey string = "multiplex_id"
// PluginRunner defines the metadata needed to run a plugin securely with // PluginRunner defines the metadata needed to run a plugin securely with
// go-plugin. // go-plugin.
type PluginRunner struct { type PluginRunner struct {

View File

@ -83,8 +83,6 @@ func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
} }
tlsConfig.BuildNameToCertificate()
return tlsConfig, nil return tlsConfig, nil
} }

View File

@ -100,6 +100,9 @@ type Auth struct {
// Orphan is set if the token does not have a parent // Orphan is set if the token does not have a parent
Orphan bool `json:"orphan"` Orphan bool `json:"orphan"`
// MFARequirement
MFARequirement *MFARequirement `json:"mfa_requirement"`
} }
func (a *Auth) GoString() string { func (a *Auth) GoString() string {

View File

@ -10,6 +10,9 @@ type Connection struct {
// RemoteAddr is the network address that sent the request. // RemoteAddr is the network address that sent the request.
RemoteAddr string `json:"remote_addr"` RemoteAddr string `json:"remote_addr"`
// RemotePort is the network port that sent the request.
RemotePort int `json:"remote_port"`
// ConnState is the TLS connection state if applicable. // ConnState is the TLS connection state if applicable.
ConnState *tls.ConnectionState `sentinel:""` ConnState *tls.ConnectionState `sentinel:""`
} }

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.27.1 // protoc-gen-go v1.27.1
// protoc v3.17.3 // protoc v3.19.3
// source: sdk/logical/identity.proto // source: sdk/logical/identity.proto
package logical package logical
@ -310,6 +310,171 @@ func (x *Group) GetNamespaceID() string {
return "" return ""
} }
type MFAMethodID struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"`
}
func (x *MFAMethodID) Reset() {
*x = MFAMethodID{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_logical_identity_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MFAMethodID) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MFAMethodID) ProtoMessage() {}
func (x *MFAMethodID) ProtoReflect() protoreflect.Message {
mi := &file_sdk_logical_identity_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MFAMethodID.ProtoReflect.Descriptor instead.
func (*MFAMethodID) Descriptor() ([]byte, []int) {
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{3}
}
func (x *MFAMethodID) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *MFAMethodID) GetID() string {
if x != nil {
return x.ID
}
return ""
}
func (x *MFAMethodID) GetUsesPasscode() bool {
if x != nil {
return x.UsesPasscode
}
return false
}
type MFAConstraintAny struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Any []*MFAMethodID `protobuf:"bytes,1,rep,name=any,proto3" json:"any,omitempty"`
}
func (x *MFAConstraintAny) Reset() {
*x = MFAConstraintAny{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_logical_identity_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MFAConstraintAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MFAConstraintAny) ProtoMessage() {}
func (x *MFAConstraintAny) ProtoReflect() protoreflect.Message {
mi := &file_sdk_logical_identity_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MFAConstraintAny.ProtoReflect.Descriptor instead.
func (*MFAConstraintAny) Descriptor() ([]byte, []int) {
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{4}
}
func (x *MFAConstraintAny) GetAny() []*MFAMethodID {
if x != nil {
return x.Any
}
return nil
}
type MFARequirement struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MFARequestID string `protobuf:"bytes,1,opt,name=mfa_request_id,json=mfaRequestId,proto3" json:"mfa_request_id,omitempty"`
MFAConstraints map[string]*MFAConstraintAny `protobuf:"bytes,2,rep,name=mfa_constraints,json=mfaConstraints,proto3" json:"mfa_constraints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *MFARequirement) Reset() {
*x = MFARequirement{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_logical_identity_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MFARequirement) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MFARequirement) ProtoMessage() {}
func (x *MFARequirement) ProtoReflect() protoreflect.Message {
mi := &file_sdk_logical_identity_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MFARequirement.ProtoReflect.Descriptor instead.
func (*MFARequirement) Descriptor() ([]byte, []int) {
return file_sdk_logical_identity_proto_rawDescGZIP(), []int{5}
}
func (x *MFARequirement) GetMFARequestID() string {
if x != nil {
return x.MFARequestID
}
return ""
}
func (x *MFARequirement) GetMFAConstraints() map[string]*MFAConstraintAny {
if x != nil {
return x.MFAConstraints
}
return nil
}
var File_sdk_logical_identity_proto protoreflect.FileDescriptor var File_sdk_logical_identity_proto protoreflect.FileDescriptor
var file_sdk_logical_identity_proto_rawDesc = []byte{ var file_sdk_logical_identity_proto_rawDesc = []byte{
@ -372,10 +537,34 @@ var file_sdk_logical_identity_proto_rawDesc = []byte{
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x01, 0x22, 0x56, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44,
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x74, 0x6f, 0x33, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73,
0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65,
0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x4d, 0x46, 0x41,
0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x12, 0x26, 0x0a,
0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67,
0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44,
0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71,
0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f,
0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x54,
0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61,
0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74,
0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61,
0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74,
0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c,
0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72,
0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74,
0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (
@ -390,27 +579,34 @@ func file_sdk_logical_identity_proto_rawDescGZIP() []byte {
return file_sdk_logical_identity_proto_rawDescData return file_sdk_logical_identity_proto_rawDescData
} }
var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_sdk_logical_identity_proto_goTypes = []interface{}{ var file_sdk_logical_identity_proto_goTypes = []interface{}{
(*Entity)(nil), // 0: logical.Entity (*Entity)(nil), // 0: logical.Entity
(*Alias)(nil), // 1: logical.Alias (*Alias)(nil), // 1: logical.Alias
(*Group)(nil), // 2: logical.Group (*Group)(nil), // 2: logical.Group
nil, // 3: logical.Entity.MetadataEntry (*MFAMethodID)(nil), // 3: logical.MFAMethodID
nil, // 4: logical.Alias.MetadataEntry (*MFAConstraintAny)(nil), // 4: logical.MFAConstraintAny
nil, // 5: logical.Alias.CustomMetadataEntry (*MFARequirement)(nil), // 5: logical.MFARequirement
nil, // 6: logical.Group.MetadataEntry nil, // 6: logical.Entity.MetadataEntry
nil, // 7: logical.Alias.MetadataEntry
nil, // 8: logical.Alias.CustomMetadataEntry
nil, // 9: logical.Group.MetadataEntry
nil, // 10: logical.MFARequirement.MFAConstraintsEntry
} }
var file_sdk_logical_identity_proto_depIDxs = []int32{ var file_sdk_logical_identity_proto_depIDxs = []int32{
1, // 0: logical.Entity.aliases:type_name -> logical.Alias 1, // 0: logical.Entity.aliases:type_name -> logical.Alias
3, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry 6, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry
4, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry 7, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry
5, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry 8, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry
6, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry 9, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry
5, // [5:5] is the sub-list for method output_type 3, // 5: logical.MFAConstraintAny.any:type_name -> logical.MFAMethodID
5, // [5:5] is the sub-list for method input_type 10, // 6: logical.MFARequirement.mfa_constraints:type_name -> logical.MFARequirement.MFAConstraintsEntry
5, // [5:5] is the sub-list for extension type_name 4, // 7: logical.MFARequirement.MFAConstraintsEntry.value:type_name -> logical.MFAConstraintAny
5, // [5:5] is the sub-list for extension extendee 8, // [8:8] is the sub-list for method output_type
0, // [0:5] is the sub-list for field type_name 8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
} }
func init() { file_sdk_logical_identity_proto_init() } func init() { file_sdk_logical_identity_proto_init() }
@ -455,6 +651,42 @@ func file_sdk_logical_identity_proto_init() {
return nil return nil
} }
} }
file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MFAMethodID); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MFAConstraintAny); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MFARequirement); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} }
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
@ -462,7 +694,7 @@ func file_sdk_logical_identity_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_sdk_logical_identity_proto_rawDesc, RawDescriptor: file_sdk_logical_identity_proto_rawDesc,
NumEnums: 0, NumEnums: 0,
NumMessages: 7, NumMessages: 11,
NumExtensions: 0, NumExtensions: 0,
NumServices: 0, NumServices: 0,
}, },

View File

@ -73,4 +73,19 @@ message Group {
// NamespaceID is the identifier of the namespace to which this group // NamespaceID is the identifier of the namespace to which this group
// belongs to. // belongs to.
string namespace_id = 4; string namespace_id = 4;
} }
message MFAMethodID {
string type = 1;
string id = 2;
bool uses_passcode = 3;
}
message MFAConstraintAny {
repeated MFAMethodID any = 1;
}
message MFARequirement {
string mfa_request_id = 1;
map<string, MFAConstraintAny> mfa_constraints = 2;
}

View File

@ -38,7 +38,6 @@ func (b BackendType) String() string {
// allows for a "procfs" like interaction, as internal state can be exposed by // allows for a "procfs" like interaction, as internal state can be exposed by
// acting like a logical backend and being mounted. // acting like a logical backend and being mounted.
type Backend interface { type Backend interface {
// Initialize is used to initialize a plugin after it has been mounted. // Initialize is used to initialize a plugin after it has been mounted.
Initialize(context.Context, *InitializationRequest) error Initialize(context.Context, *InitializationRequest) error
@ -113,18 +112,19 @@ type Factory func(context.Context, *BackendConfig) (Backend, error)
// Paths is the structure of special paths that is used for SpecialPaths. // Paths is the structure of special paths that is used for SpecialPaths.
type Paths struct { type Paths struct {
// Root are the paths that require a root token to access // Root are the API paths that require a root token to access
Root []string Root []string
// Unauthenticated are the paths that can be accessed without any auth. // Unauthenticated are the API paths that can be accessed without any auth.
// These can't be regular expressions, it is either exact match, a prefix // These can't be regular expressions, it is either exact match, a prefix
// match and/or a wildcard match. For prefix match, append '*' as a suffix. // match and/or a wildcard match. For prefix match, append '*' as a suffix.
// For a wildcard match, use '+' in the segment to match any identifier // For a wildcard match, use '+' in the segment to match any identifier
// (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash. // (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash.
Unauthenticated []string Unauthenticated []string
// LocalStorage are paths (prefixes) that are local to this instance; this // LocalStorage are storage paths (prefixes) that are local to this cluster;
// indicates that these paths should not be replicated // this indicates that these paths should not be replicated across performance clusters
// (DR replication is unaffected).
LocalStorage []string LocalStorage []string
// SealWrapStorage are storage paths that, when using a capable seal, // SealWrapStorage are storage paths that, when using a capable seal,

View File

@ -0,0 +1,84 @@
package logical
import (
"context"
"crypto"
"io"
)
type KeyUsage int
const (
KeyUsageEncrypt KeyUsage = 1 + iota
KeyUsageDecrypt
KeyUsageSign
KeyUsageVerify
KeyUsageWrap
KeyUsageUnwrap
)
type ManagedKey interface {
// Name is a human-readable identifier for a managed key that may change/renamed. Use Uuid if a
// long term consistent identifier is needed.
Name() string
// UUID is a unique identifier for a managed key that is guaranteed to remain
// consistent even if a key is migrated or renamed.
UUID() string
// Present returns true if the key is established in the KMS. This may return false if for example
// an HSM library is not configured on all cluster nodes.
Present(ctx context.Context) (bool, error)
// AllowsAll returns true if all the requested usages are supported by the managed key.
AllowsAll(usages []KeyUsage) bool
}
type (
ManagedKeyConsumer func(context.Context, ManagedKey) error
ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error
)
type ManagedKeySystemView interface {
// WithManagedKeyByName retrieves an instantiated managed key for consumption by the given function. The
// provided key can only be used within the scope of that function call
WithManagedKeyByName(ctx context.Context, keyName, mountPoint string, f ManagedKeyConsumer) error
// WithManagedKeyByUUID retrieves an instantiated managed key for consumption by the given function. The
// provided key can only be used within the scope of that function call
WithManagedKeyByUUID(ctx context.Context, keyUuid, mountPoint string, f ManagedKeyConsumer) error
// WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByName
WithManagedSigningKeyByName(ctx context.Context, keyName, mountPoint string, f ManagedSigningKeyConsumer) error
// WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID
WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, mountPoint string, f ManagedSigningKeyConsumer) error
}
type ManagedAsymmetricKey interface {
ManagedKey
GetPublicKey(ctx context.Context) (crypto.PublicKey, error)
}
type ManagedKeyLifecycle interface {
// GenerateKey generates a key in the KMS if it didn't yet exist, returning the id.
// If it already existed, returns the existing id. KMSKey's key material is ignored if present.
GenerateKey(ctx context.Context) (string, error)
}
type ManagedSigningKey interface {
ManagedAsymmetricKey
// Sign returns a digital signature of the provided value. The SignerOpts param must provide the hash function
// that generated the value (if any).
// The optional randomSource specifies the source of random values and may be ignored by the implementation
// (such as on HSMs with their own internal RNG)
Sign(ctx context.Context, value []byte, randomSource io.Reader, opts crypto.SignerOpts) ([]byte, error)
// Verify verifies the provided signature against the value. The SignerOpts param must provide the hash function
// that generated the value (if any).
// If true is returned the signature is correct, false otherwise.
Verify(ctx context.Context, signature, value []byte, opts crypto.SignerOpts) (bool, error)
// GetSigner returns an implementation of crypto.Signer backed by the managed key. This should be called
// as needed so as to use per request contexts.
GetSigner(context.Context) (crypto.Signer, error)
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.27.1 // protoc-gen-go v1.27.1
// protoc v3.17.3 // protoc v3.19.3
// source: sdk/logical/plugin.proto // source: sdk/logical/plugin.proto
package logical package logical

View File

@ -220,6 +220,10 @@ type Request struct {
// this will be the sha256(sorted policies + namespace) associated with the // this will be the sha256(sorted policies + namespace) associated with the
// client token. // client token.
ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""` ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""`
// InboundSSCToken is the token that arrives on an inbound request, supplied
// by the vault user.
InboundSSCToken string
} }
// Clone returns a deep copy of the request by using copystructure // Clone returns a deep copy of the request by using copystructure
@ -377,3 +381,14 @@ type InitializationRequest struct {
// Storage can be used to durably store and retrieve state. // Storage can be used to durably store and retrieve state.
Storage Storage Storage Storage
} }
type CustomHeader struct {
Name string
Value string
}
type CtxKeyInFlightRequestID struct{}
func (c CtxKeyInFlightRequestID) String() string {
return "in-flight-request-ID"
}

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"sync/atomic" "sync/atomic"
"github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/helper/wrapping"
@ -209,13 +210,103 @@ func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter {
} }
// Write will write the bytes to the underlying io.Writer. // Write will write the bytes to the underlying io.Writer.
func (rw *HTTPResponseWriter) Write(bytes []byte) (int, error) { func (w *HTTPResponseWriter) Write(bytes []byte) (int, error) {
atomic.StoreUint32(rw.written, 1) atomic.StoreUint32(w.written, 1)
return w.ResponseWriter.Write(bytes)
return rw.ResponseWriter.Write(bytes)
} }
// Written tells us if the writer has been written to yet. // Written tells us if the writer has been written to yet.
func (rw *HTTPResponseWriter) Written() bool { func (w *HTTPResponseWriter) Written() bool {
return atomic.LoadUint32(rw.written) == 1 return atomic.LoadUint32(w.written) == 1
} }
type WrappingResponseWriter interface {
http.ResponseWriter
Wrapped() http.ResponseWriter
}
type StatusHeaderResponseWriter struct {
wrapped http.ResponseWriter
wroteHeader bool
StatusCode int
headers map[string][]*CustomHeader
}
func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*CustomHeader) *StatusHeaderResponseWriter {
return &StatusHeaderResponseWriter{
wrapped: w,
wroteHeader: false,
StatusCode: 200,
headers: h,
}
}
func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter {
return w.wrapped
}
func (w *StatusHeaderResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *StatusHeaderResponseWriter) Write(buf []byte) (int, error) {
// It is allowed to only call ResponseWriter.Write and skip
// ResponseWriter.WriteHeader. An example of such a situation is
// "handleUIStub". The Write function will internally set the status code
// 200 for the response for which that call might invoke other
// implementations of the WriteHeader function. So, we still need to set
// the custom headers. In cases where both WriteHeader and Write of
// statusHeaderResponseWriter struct are called the internal call to the
// WriterHeader invoked from inside Write method won't change the headers.
if !w.wroteHeader {
w.setCustomResponseHeaders(w.StatusCode)
}
return w.wrapped.Write(buf)
}
func (w *StatusHeaderResponseWriter) WriteHeader(statusCode int) {
w.setCustomResponseHeaders(statusCode)
w.wrapped.WriteHeader(statusCode)
w.StatusCode = statusCode
// in cases where Write is called after WriteHeader, let's prevent setting
// ResponseWriter headers twice
w.wroteHeader = true
}
func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) {
sch := w.headers
if sch == nil {
return
}
// Checking the validity of the status code
if status >= 600 || status < 100 {
return
}
// setter function to set the headers
setter := func(hvl []*CustomHeader) {
for _, hv := range hvl {
w.Header().Set(hv.Name, hv.Value)
}
}
// Setting the default headers first
setter(sch["default"])
// setting the Xyy pattern first
d := fmt.Sprintf("%vxx", status/100)
if val, ok := sch[d]; ok {
setter(val)
}
// Setting the specific headers
if val, ok := sch[strconv.Itoa(status)]; ok {
setter(val)
}
return
}
var _ WrappingResponseWriter = &StatusHeaderResponseWriter{}

View File

@ -17,7 +17,7 @@ import (
func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
if err == nil && (resp == nil || !resp.IsError()) { if err == nil && (resp == nil || !resp.IsError()) {
switch { switch {
case req.Operation == ReadOperation, req.Operation == PatchOperation: case req.Operation == ReadOperation:
if resp == nil { if resp == nil {
return http.StatusNotFound, nil return http.StatusNotFound, nil
} }

View File

@ -56,6 +56,10 @@ type SystemView interface {
// name. Returns a PluginRunner or an error if a plugin can not be found. // name. Returns a PluginRunner or an error if a plugin can not be found.
LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error)
// NewPluginClient returns a client for managing the lifecycle of plugin
// processes
NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error)
// MlockEnabled returns the configuration setting for enabling mlock on // MlockEnabled returns the configuration setting for enabling mlock on
// plugins. // plugins.
MlockEnabled() bool MlockEnabled() bool
@ -152,6 +156,10 @@ func (d StaticSystemView) ReplicationState() consts.ReplicationState {
return d.ReplicationStateVal return d.ReplicationStateVal
} }
func (d StaticSystemView) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) {
return nil, errors.New("NewPluginClient is not implemented in StaticSystemView")
}
func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView")
} }

View File

@ -1,7 +1,11 @@
package logical package logical
import ( import (
"crypto/sha256"
"encoding/base64"
"fmt" "fmt"
"sort"
"strings"
"time" "time"
sockaddr "github.com/hashicorp/go-sockaddr" sockaddr "github.com/hashicorp/go-sockaddr"
@ -20,13 +24,24 @@ const (
// TokenTypeBatch is a batch token // TokenTypeBatch is a batch token
TokenTypeBatch TokenTypeBatch
// TokenTypeDefaultService, configured on a mount, means that if // TokenTypeDefaultService configured on a mount, means that if
// TokenTypeDefault is sent back by the mount, create Service tokens // TokenTypeDefault is sent back by the mount, create Service tokens
TokenTypeDefaultService TokenTypeDefaultService
// TokenTypeDefaultBatch, configured on a mount, means that if // TokenTypeDefaultBatch configured on a mount, means that if
// TokenTypeDefault is sent back by the mount, create Batch tokens // TokenTypeDefault is sent back by the mount, create Batch tokens
TokenTypeDefaultBatch TokenTypeDefaultBatch
// ClientIDTWEDelimiter Delimiter between the string fields used to generate a client
// ID for tokens without entities. This is the 0 character, which
// is a non-printable string. Please see unicode.IsPrint for details.
ClientIDTWEDelimiter = rune('\x00')
// SortedPoliciesTWEDelimiter Delimiter between each policy in the sorted policies used to
// generate a client ID for tokens without entities. This is the 127
// character, which is a non-printable string. Please see unicode.IsPrint
// for details.
SortedPoliciesTWEDelimiter = rune('\x7F')
) )
func (t *TokenType) UnmarshalJSON(b []byte) error { func (t *TokenType) UnmarshalJSON(b []byte) error {
@ -78,6 +93,10 @@ type TokenEntry struct {
// ID of this entry, generally a random UUID // ID of this entry, generally a random UUID
ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""`
// ExternalID is the ID of a newly created service
// token that will be returned to a user
ExternalID string `json:"-"`
// Accessor for this token, a random UUID // Accessor for this token, a random UUID
Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""`
@ -154,6 +173,46 @@ type TokenEntry struct {
CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""`
} }
// CreateClientID returns the client ID, and a boolean which is false if the clientID
// has an entity, and true otherwise
func (te *TokenEntry) CreateClientID() (string, bool) {
var clientIDInputBuilder strings.Builder
// if entry has an associated entity ID, return it
if te.EntityID != "" {
return te.EntityID, false
}
// The entry is associated with a TWE (token without entity). In this case
// we must create a client ID by calculating the following formula:
// clientID = SHA256(sorted policies + namespace)
// Step 1: Copy entry policies to a new struct
sortedPolicies := make([]string, len(te.Policies))
copy(sortedPolicies, te.Policies)
// Step 2: Sort and join copied policies
sort.Strings(sortedPolicies)
for _, pol := range sortedPolicies {
clientIDInputBuilder.WriteRune(SortedPoliciesTWEDelimiter)
clientIDInputBuilder.WriteString(pol)
}
// Step 3: Add namespace ID
clientIDInputBuilder.WriteRune(ClientIDTWEDelimiter)
clientIDInputBuilder.WriteString(te.NamespaceID)
if clientIDInputBuilder.Len() == 0 {
return "", true
}
// Step 4: Remove the first character in the string, as it's an unnecessary delimiter
clientIDInput := clientIDInputBuilder.String()[1:]
// Step 5: Hash the sum
hashed := sha256.Sum256([]byte(clientIDInput))
return base64.StdEncoding.EncodeToString(hashed[:]), true
}
func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { func (te *TokenEntry) SentinelGet(key string) (interface{}, error) {
if te == nil { if te == nil {
return nil, nil return nil, nil

View File

@ -39,6 +39,8 @@ func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
EntityID: input.Auth.EntityID, EntityID: input.Auth.EntityID,
TokenType: input.Auth.TokenType.String(), TokenType: input.Auth.TokenType.String(),
Orphan: input.Auth.Orphan, Orphan: input.Auth.Orphan,
MFARequirement: input.Auth.MFARequirement,
NumUses: input.Auth.NumUses,
} }
} }
@ -108,6 +110,8 @@ type HTTPAuth struct {
EntityID string `json:"entity_id"` EntityID string `json:"entity_id"`
TokenType string `json:"token_type"` TokenType string `json:"token_type"`
Orphan bool `json:"orphan"` Orphan bool `json:"orphan"`
MFARequirement *MFARequirement `json:"mfa_requirement"`
NumUses int `json:"num_uses"`
} }
type HTTPWrapInfo struct { type HTTPWrapInfo struct {
@ -134,8 +138,8 @@ func (h HTTPSysInjector) MarshalJSON() ([]byte, error) {
} }
// Marshaling a response will always be a JSON object, meaning it will // Marshaling a response will always be a JSON object, meaning it will
// always start with '{', so we hijack this to prepend necessary values // always start with '{', so we hijack this to prepend necessary values
// Make a guess at the capacity, and write the object opener
buf := bytes.NewBuffer(make([]byte, 0, len(j)*2)) var buf bytes.Buffer
buf.WriteRune('{') buf.WriteRune('{')
for k, v := range h.Response.Data { for k, v := range h.Response.Data {
// Marshal each key/value individually // Marshal each key/value individually

View File

@ -184,7 +184,7 @@ func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) {
return nil, err return nil, err
} }
// Cache the result // Cache the result, even if nil
c.lru.Add(key, ent) c.lru.Add(key, ent)
return ent, nil return ent, nil

View File

@ -21,6 +21,7 @@ const (
const ( const (
ErrValueTooLarge = "put failed due to value being too large" ErrValueTooLarge = "put failed due to value being too large"
ErrKeyTooLarge = "put failed due to key being too large"
) )
// Backend is the interface required for a physical // Backend is the interface required for a physical

View File

@ -1,4 +1,4 @@
// +build cgo //go:build cgo
package version package version

View File

@ -7,10 +7,10 @@ import (
// VersionInfo // VersionInfo
type VersionInfo struct { type VersionInfo struct {
Revision string Revision string `json:"revision,omitempty"`
Version string Version string `json:"version,omitempty"`
VersionPrerelease string VersionPrerelease string `json:"version_prerelease,omitempty"`
VersionMetadata string VersionMetadata string `json:"version_metadata,omitempty"`
} }
func GetVersion() *VersionInfo { func GetVersion() *VersionInfo {
@ -37,7 +37,7 @@ func (c *VersionInfo) VersionNumber() string {
return "(version unknown)" return "(version unknown)"
} }
version := fmt.Sprintf("%s", c.Version) version := c.Version
if c.VersionPrerelease != "" { if c.VersionPrerelease != "" {
version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)

View File

@ -8,7 +8,7 @@ var (
// Whether cgo is enabled or not; set at build time // Whether cgo is enabled or not; set at build time
CgoEnabled bool CgoEnabled bool
Version = "1.9.0" Version = "1.10.0"
VersionPrerelease = "dev" VersionPrerelease = "dev1"
VersionMetadata = "" VersionMetadata = ""
) )

4
vendor/modules.txt vendored
View File

@ -242,10 +242,10 @@ github.com/hashicorp/hcl/json/token
## explicit; go 1.13 ## explicit; go 1.13
github.com/hashicorp/vault/command/agent/auth github.com/hashicorp/vault/command/agent/auth
github.com/hashicorp/vault/command/agent/auth/kubernetes github.com/hashicorp/vault/command/agent/auth/kubernetes
# github.com/hashicorp/vault/api v1.3.1 # github.com/hashicorp/vault/api v1.4.1
## explicit; go 1.13 ## explicit; go 1.13
github.com/hashicorp/vault/api github.com/hashicorp/vault/api
# github.com/hashicorp/vault/sdk v0.3.0 # github.com/hashicorp/vault/sdk v0.4.1
## explicit; go 1.16 ## explicit; go 1.16
github.com/hashicorp/vault/sdk/helper/certutil github.com/hashicorp/vault/sdk/helper/certutil
github.com/hashicorp/vault/sdk/helper/compressutil github.com/hashicorp/vault/sdk/helper/compressutil