Merge pull request #143 from ceph/devel

Sync downstream devel with upstream devel
This commit is contained in:
OpenShift Merge Robot 2022-10-31 03:38:34 -04:00 committed by GitHub
commit 8abfa19ea5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
366 changed files with 29276 additions and 4190 deletions

View File

@ -0,0 +1,93 @@
---
name: Add comment
# yamllint disable-line rule:truthy
on:
pull_request_target:
branches:
- devel
- "release-v*"
types:
- labeled
jobs:
add-comment:
if: github.event.label.name == 'ok-to-test'
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Add comment to trigger external storage tests for Kubernetes 1.23
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/k8s-e2e-external-storage/1.23
- name: Add comment to trigger external storage tests for Kubernetes 1.24
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/k8s-e2e-external-storage/1.24
- name: Add comment to trigger external storage tests for Kubernetes 1.25
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/k8s-e2e-external-storage/1.25
- name: Add comment to trigger helm E2E tests for Kubernetes 1.23
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e-helm/k8s-1.23
- name: Add comment to trigger helm E2E tests for Kubernetes 1.24
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e-helm/k8s-1.24
- name: Add comment to trigger helm E2E tests for Kubernetes 1.25
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e-helm/k8s-1.25
- name: Add comment to trigger E2E tests for Kubernetes 1.23
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e/k8s-1.23
- name: Add comment to trigger E2E tests for Kubernetes 1.24
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e/k8s-1.24
- name: Add comment to trigger E2E tests for Kubernetes 1.25
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e/k8s-1.25
- name: Add comment to trigger cephfs upgrade tests
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/upgrade-tests-cephfs
- name: Add comment to trigger rbd upgrade tests
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/upgrade-tests-rbd

View File

@ -26,15 +26,15 @@ queue_rules:
- "status-success=golangci-lint" - "status-success=golangci-lint"
- "status-success=mod-check" - "status-success=mod-check"
- "status-success=lint-extras" - "status-success=lint-extras"
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
- "status-success=ci/centos/k8s-e2e-external-storage/1.23" - "status-success=ci/centos/k8s-e2e-external-storage/1.23"
- "status-success=ci/centos/k8s-e2e-external-storage/1.24" - "status-success=ci/centos/k8s-e2e-external-storage/1.24"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - "status-success=ci/centos/k8s-e2e-external-storage/1.25"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24" - "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.22" - "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
- "status-success=ci/centos/mini-e2e/k8s-1.23" - "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.24" - "status-success=ci/centos/mini-e2e/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.25"
- "status-success=ci/centos/upgrade-tests-cephfs" - "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd" - "status-success=ci/centos/upgrade-tests-rbd"
- and: - and:
@ -75,15 +75,15 @@ pull_request_rules:
- "status-success=golangci-lint" - "status-success=golangci-lint"
- "status-success=mod-check" - "status-success=mod-check"
- "status-success=lint-extras" - "status-success=lint-extras"
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
- "status-success=ci/centos/k8s-e2e-external-storage/1.23" - "status-success=ci/centos/k8s-e2e-external-storage/1.23"
- "status-success=ci/centos/k8s-e2e-external-storage/1.24" - "status-success=ci/centos/k8s-e2e-external-storage/1.24"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - "status-success=ci/centos/k8s-e2e-external-storage/1.25"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24" - "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.22" - "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
- "status-success=ci/centos/mini-e2e/k8s-1.23" - "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.24" - "status-success=ci/centos/mini-e2e/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.25"
- "status-success=ci/centos/upgrade-tests-cephfs" - "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd" - "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO" - "status-success=DCO"
@ -114,15 +114,15 @@ pull_request_rules:
- "status-success=commitlint" - "status-success=commitlint"
- "status-success=mod-check" - "status-success=mod-check"
- "status-success=lint-extras" - "status-success=lint-extras"
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
- "status-success=ci/centos/k8s-e2e-external-storage/1.23" - "status-success=ci/centos/k8s-e2e-external-storage/1.23"
- "status-success=ci/centos/k8s-e2e-external-storage/1.24" - "status-success=ci/centos/k8s-e2e-external-storage/1.24"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - "status-success=ci/centos/k8s-e2e-external-storage/1.25"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24" - "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.22" - "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
- "status-success=ci/centos/mini-e2e/k8s-1.23" - "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.24" - "status-success=ci/centos/mini-e2e/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.25"
- "status-success=ci/centos/upgrade-tests-cephfs" - "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd" - "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO" - "status-success=DCO"
@ -145,15 +145,15 @@ pull_request_rules:
- "status-success=mod-check" - "status-success=mod-check"
- "status-success=lint-extras" - "status-success=lint-extras"
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
- "status-success=ci/centos/k8s-e2e-external-storage/1.23" - "status-success=ci/centos/k8s-e2e-external-storage/1.23"
- "status-success=ci/centos/k8s-e2e-external-storage/1.24" - "status-success=ci/centos/k8s-e2e-external-storage/1.24"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - "status-success=ci/centos/k8s-e2e-external-storage/1.25"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24" - "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.22" - "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
- "status-success=ci/centos/mini-e2e/k8s-1.23" - "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.24" - "status-success=ci/centos/mini-e2e/k8s-1.24"
- "status-success=ci/centos/mini-e2e/k8s-1.25"
- "status-success=ci/centos/upgrade-tests-cephfs" - "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd" - "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO" - "status-success=DCO"

View File

@ -56,6 +56,7 @@ environments.
| Ceph CSI Version | Container Orchestrator Name | Version Tested| | Ceph CSI Version | Container Orchestrator Name | Version Tested|
| -----------------| --------------------------- | --------------| | -----------------| --------------------------- | --------------|
| v3.7.2 | Kubernetes | v1.22, v1.23, v1.24|
| v3.7.1 | Kubernetes | v1.22, v1.23, v1.24| | v3.7.1 | Kubernetes | v1.22, v1.23, v1.24|
| v3.7.0 | Kubernetes | v1.22, v1.23, v1.24| | v3.7.0 | Kubernetes | v1.22, v1.23, v1.24|
| v3.6.1 | Kubernetes | v1.21, v1.22, v1.23| | v3.6.1 | Kubernetes | v1.21, v1.22, v1.23|
@ -130,6 +131,7 @@ in the Kubernetes documentation.
| Ceph CSI Release/Branch | Container image name | Image Tag | | Ceph CSI Release/Branch | Container image name | Image Tag |
| ----------------------- | ---------------------------- | --------- | | ----------------------- | ---------------------------- | --------- |
| devel (Branch) | quay.io/cephcsi/cephcsi | canary | | devel (Branch) | quay.io/cephcsi/cephcsi | canary |
| v3.7.2 (Release) | quay.io/cephcsi/cephcsi | v3.7.2 |
| v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 | | v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 |
| v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 | | v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 |
| v3.6.1 (Release) | quay.io/cephcsi/cephcsi | v3.6.1 | | v3.6.1 (Release) | quay.io/cephcsi/cephcsi | v3.6.1 |

View File

@ -43,7 +43,7 @@ VM_DRIVER=none
CHANGE_MINIKUBE_NONE_USER=true CHANGE_MINIKUBE_NONE_USER=true
# Rook options # Rook options
ROOK_VERSION=v1.9.8 ROOK_VERSION=v1.10.4
# Provide ceph image path # Provide ceph image path
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17 ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17

View File

@ -105,12 +105,12 @@ compatibility support and without prior notice.
**Also, we do not recommend any direct upgrades to 3.7 except from 3.6 to 3.7.** **Also, we do not recommend any direct upgrades to 3.7 except from 3.6 to 3.7.**
For example, upgrading from 3.5 to 3.7 is not recommended. For example, upgrading from 3.5 to 3.7 is not recommended.
git checkout v3.7.1 tag git checkout v3.7.2 tag
```bash ```bash
git clone https://github.com/ceph/ceph-csi.git git clone https://github.com/ceph/ceph-csi.git
cd ./ceph-csi cd ./ceph-csi
git checkout v3.7.1 git checkout v3.7.2
``` ```
```console ```console

View File

@ -1,45 +0,0 @@
# ceph-fuse: detection of corrupted mounts and their recovery
Mounts managed by ceph-fuse may get corrupted by e.g. the ceph-fuse process
exiting abruptly, or its parent Node Plugin container being terminated, taking
down its child processes with it.
This may manifest in concerned workloads like so:
```
# mount | grep fuse
ceph-fuse on /cephfs-share type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
# ls /cephfs-share
ls: /cephfs-share: Socket not connected
```
or,
```
# stat /home/kubelet/pods/ae344b80-3b07-4589-b1a1-ca75fa9debf2/volumes/kubernetes.io~csi/pvc-ec69de59-7823-4840-8eee-544f8261fef0/mount: transport endpoint is not connected
```
This feature allows CSI CephFS plugin to be able to detect if a ceph-fuse mount
is corrupted during the volume publishing phase, and will attempt to recover it
for the newly scheduled pod. Pods that already reside on a node whose
ceph-fuse mountpoints were broken may still need to be restarted, however.
## Detection
A mountpoint is deemed corrupted if `stat()`-ing it returns one of the
following errors:
* `ENOTCONN`
* `ESTALE`
* `EIO`
* `EACCES`
* `EHOSTDOWN`
## Recovery
Once a mountpoint corruption is detected, its recovery is performed by
remounting the volume associated with it.
Recovery is attempted only if `/csi/mountinfo` directory is made available to
CSI CephFS plugin (available by default in the Helm chart and Kubernetes
manifests).

View File

@ -0,0 +1,89 @@
# Ceph mount corruption detection and recover
## ceph-fuse: detection of corrupted mounts and their recovery
Mounts managed by ceph-fuse may get corrupted by e.g. the ceph-fuse process
exiting abruptly, or its parent Node Plugin container being terminated, taking
down its child processes with it.
This may manifest in concerned workloads like so:
```
# mount | grep fuse
ceph-fuse on /cephfs-share type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
# ls /cephfs-share
ls: /cephfs-share: Socket not connected
```
or,
```
# stat /home/kubelet/pods/ae344b80-3b07-4589-b1a1-ca75fa9debf2/volumes/kubernetes.io~csi/pvc-ec69de59-7823-4840-8eee-544f8261fef0/mount: transport endpoint is not connected
```
This feature allows CSI CephFS plugin to be able to detect if a ceph-fuse mount
is corrupted during the volume publishing phase, and will attempt to recover it
for the newly scheduled pod. Pods that already reside on a node whose
ceph-fuse mountpoints were broken may still need to be restarted, however.
### ceph-fuse corruption detection
A mountpoint is deemed corrupted if `stat()`-ing it returns one of the
following errors:
* `ENOTCONN`
* `ESTALE`
* `EIO`
* `EACCES`
* `EHOSTDOWN`
### ceph-fuse recovery
Once a mountpoint corruption is detected, its recovery is performed by
remounting the volume associated with it.
Recovery is attempted only if `/csi/mountinfo` directory is made available to
CSI CephFS plugin (available by default in the Helm chart and Kubernetes
manifests).
## kernel client: detection of corrupted mounts and their recovery
Mounts managed by ceph-kernel may get corrupted e.g. if your network
connection is disrupted for a long enough time, the client will be forcibly
disconnected from the system. More details can be found
[here](https://docs.ceph.com/en/quincy/cephfs/troubleshooting/#disconnected-remounted-fs)
The above case may manifest in concerned workloads like so:
```
# mount | grep ceph
10.102.104.172:6789:/volumes/csi/csi-vol-7fed1ce7-97cf-43ef-9b84-2a49ab992515/d61be75e-74ae-428c-a5d1-48f79d1d3c8c on /var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146ec2b5d9a9db62e698abbe0adcae19c0e01f5cf15d3d593ed33c7bc1a8d/globalmount type ceph (rw,relatime,name=csi-cephfs-node,secret=<hidden>,fsid=00000000-0000-0000-0000-000000000000,acl,mds_namespace=myfs,_netdev)
10.102.104.172:6789:/volumes/csi/csi-vol-7fed1ce7-97cf-43ef-9b84-2a49ab992515/d61be75e-74ae-428c-a5d1-48f79d1d3c8c on /var/lib/kubelet/pods/8087df68-9756-4f38-86ef-6c81e1075607/volumes/kubernetes.io~csi/pvc-15e63d0a-77de-4886-8d0f-516f9fecbeb4/mount type ceph (rw,relatime,name=csi-cephfs-node,secret=<hidden>,fsid=00000000-0000-0000-0000-000000000000,acl,mds_namespace=myfs,_netdev)# ls /cephfs-share
sh-4.4# ls /var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146ec2b5d9a9db62e698abbe0adcae19c0e01f5cf15d3d593ed33c7bc1a8d/globalmount
ls: cannot access '/var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146ec2b5d9a9db62e698abbe0adcae19c0e01f5cf15d3d593ed33c7bc1a8d/globalmount': Permission denied
```
### kernel clinet corruption detection
A mountpoint is deemed corrupted if `stat()`-ing it returns one of the
following errors:
* `ENOTCONN`
* `ESTALE`
* `EIO`
* `EACCES`
* `EHOSTDOWN`
More details about the error codes can be found [here](https://www.gnu.org/software/libc/manual/html_node/Error-Codes.html)
For such mounts, The CephCSI nodeplugin returns volume_condition as abnormal for `NodeGetVolumeStats` RPC call.
### kernel client recovery
Once a mountpoint corruption is detected, Below are the two methods to recover from it.
* Reboot the node where the abnormal volume behavior is observed.
* Scale down all the applications using the CephFS PVC on the node where abnormal mounts
are present. Once all the applications are deleted, scale up the application
to remount the CephFS PVC to application pods.

View File

@ -252,6 +252,7 @@ A few labels interact with automation around the pull requests:
* DNM: DO NOT MERGE (Mergify will not merge this PR) * DNM: DO NOT MERGE (Mergify will not merge this PR)
* ci/skip/e2e: skip running e2e CI jobs * ci/skip/e2e: skip running e2e CI jobs
* ci/skip/multi-arch-build: skip building container images for different architectures * ci/skip/multi-arch-build: skip building container images for different architectures
* ok-to-test: PR is ready for e2e testing.
**Review Process:** **Review Process:**
Once your PR has been submitted for review the following criteria will Once your PR has been submitted for review the following criteria will
@ -268,6 +269,9 @@ need to be met before it will be merged:
community feedback. community feedback.
* The 24 working hours counts hours occurring Mon-Fri in the local timezone * The 24 working hours counts hours occurring Mon-Fri in the local timezone
of the submitter. of the submitter.
* ceph-csi-maintainers/ceph-csi-contributors can add `ok-to-test` label to the
pull request when they think it is ready for e2e testing. This is done to avoid
load on the CI.
* Each PR must be fully updated to devel and tests must have passed * Each PR must be fully updated to devel and tests must have passed
* If the PR is having trivial changes or the reviewer is confident enough that * If the PR is having trivial changes or the reviewer is confident enough that
PR doesn't need a second review, the reviewer can set `ready-to-merge` label PR doesn't need a second review, the reviewer can set `ready-to-merge` label

View File

@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/pod-security-admission/api"
) )
var ( var (
@ -146,6 +147,7 @@ func validateSubvolumePath(f *framework.Framework, pvcName, pvcNamespace, fileSy
var _ = Describe(cephfsType, func() { var _ = Describe(cephfsType, func() {
f := framework.NewDefaultFramework(cephfsType) f := framework.NewDefaultFramework(cephfsType)
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
var c clientset.Interface var c clientset.Interface
// deploy CephFS CSI // deploy CephFS CSI
BeforeEach(func() { BeforeEach(func() {

View File

@ -32,6 +32,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/pod-security-admission/api"
) )
var ( var (
@ -236,6 +237,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
var _ = Describe("nfs", func() { var _ = Describe("nfs", func() {
f := framework.NewDefaultFramework("nfs") f := framework.NewDefaultFramework("nfs")
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
var c clientset.Interface var c clientset.Interface
// deploy CephFS CSI // deploy CephFS CSI
BeforeEach(func() { BeforeEach(func() {

View File

@ -33,6 +33,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/pod-security-admission/api"
) )
var ( var (
@ -252,6 +253,7 @@ func ByFileAndBlockEncryption(
var _ = Describe("RBD", func() { var _ = Describe("RBD", func() {
f := framework.NewDefaultFramework(rbdType) f := framework.NewDefaultFramework(rbdType)
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
var c clientset.Interface var c clientset.Interface
var kernelRelease string var kernelRelease string
// deploy RBD CSI // deploy RBD CSI
@ -511,6 +513,7 @@ var _ = Describe("RBD", func() {
}) })
By("reattach the old PV to a new PVC and check if PVC metadata is updated on RBD image", func() { By("reattach the old PV to a new PVC and check if PVC metadata is updated on RBD image", func() {
reattachPVCNamespace := fmt.Sprintf("%s-2", f.Namespace.Name)
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) e2elog.Failf("failed to load PVC: %v", err)
@ -591,8 +594,15 @@ var _ = Describe("RBD", func() {
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 1, defaultRBDPool) validateRBDImageCount(f, 1, defaultRBDPool)
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
// create namespace for reattach PVC, deletion will be taken care by framework
ns, err := f.CreateNamespace(reattachPVCNamespace, nil)
if err != nil {
e2elog.Failf("failed to create namespace: %v", err)
}
pvcObj.Name = "rbd-pvc-new" pvcObj.Name = "rbd-pvc-new"
pvcObj.Namespace = ns.Name
// unset the resource version as should not be set on objects to be created // unset the resource version as should not be set on objects to be created
pvcObj.ResourceVersion = "" pvcObj.ResourceVersion = ""
err = createPVCAndvalidatePV(f.ClientSet, pvcObj, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvcObj, deployTimeout)
@ -617,6 +627,19 @@ var _ = Describe("RBD", func() {
e2elog.Failf("expected pvcName %q got %q", pvcObj.Name, pvcName) e2elog.Failf("expected pvcName %q got %q", pvcObj.Name, pvcName)
} }
owner, stdErr, err := execCommandInToolBoxPod(f,
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey),
rookNamespace)
if err != nil || stdErr != "" {
e2elog.Failf("failed to get owner name %s/%s %s: err=%v stdErr=%q",
rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey, err, stdErr)
}
owner = strings.TrimSuffix(owner, "\n")
if owner != pvcObj.Namespace {
e2elog.Failf("expected pvcNamespace name %q got %q", pvcObj.Namespace, owner)
}
patchBytes = []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Delete"}}`) patchBytes = []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Delete"}}`)
_, err = c.CoreV1().PersistentVolumes().Patch( _, err = c.CoreV1().PersistentVolumes().Patch(
context.TODO(), context.TODO(),

View File

@ -30,10 +30,12 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/pod-security-admission/api"
) )
var _ = Describe("CephFS Upgrade Testing", func() { var _ = Describe("CephFS Upgrade Testing", func() {
f := framework.NewDefaultFramework("upgrade-test-cephfs") f := framework.NewDefaultFramework("upgrade-test-cephfs")
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
var ( var (
c clientset.Interface c clientset.Interface
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim

View File

@ -30,10 +30,12 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/pod-security-admission/api"
) )
var _ = Describe("RBD Upgrade Testing", func() { var _ = Describe("RBD Upgrade Testing", func() {
f := framework.NewDefaultFramework("upgrade-test-rbd") f := framework.NewDefaultFramework("upgrade-test-rbd")
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
var ( var (
// cwd stores the initial working directory. // cwd stores the initial working directory.
cwd string cwd string

View File

@ -215,11 +215,14 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
filterLessCmds := []string{cmds.radosLsCmd, cmds.radosLsKeysCmd} filterLessCmds := []string{cmds.radosLsCmd, cmds.radosLsKeysCmd}
for i, cmd := range filterCmds { for i, cmd := range filterCmds {
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace) stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
if err != nil || stdErr != "" { if err != nil {
if !strings.Contains(err.Error(), exitOneErr) { if !strings.Contains(err.Error(), exitOneErr) {
e2elog.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr) e2elog.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr)
} }
} }
if stdErr != "" {
e2elog.Failf("failed to execute rados command '%s' : stdErr=%s", cmd, stdErr)
}
err = compareStdoutWithCount(stdOut, count) err = compareStdoutWithCount(stdOut, count)
if err == nil { if err == nil {
continue continue

42
go.mod
View File

@ -4,32 +4,32 @@ go 1.17
require ( require (
github.com/IBM/keyprotect-go-client v0.8.1 github.com/IBM/keyprotect-go-client v0.8.1
github.com/aws/aws-sdk-go v1.44.96 github.com/aws/aws-sdk-go v1.44.122
github.com/aws/aws-sdk-go-v2/service/sts v1.16.17 github.com/aws/aws-sdk-go-v2/service/sts v1.17.1
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag // TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
github.com/ceph/go-ceph v0.17.0 github.com/ceph/go-ceph v0.17.0
github.com/container-storage-interface/spec v1.6.0 github.com/container-storage-interface/spec v1.6.0
github.com/csi-addons/replication-lib-utils v0.2.0 github.com/csi-addons/replication-lib-utils v0.2.0
github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900 github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900
github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/gemalto/kmip-go v0.0.8
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
github.com/google/fscrypt v0.3.3 github.com/google/fscrypt v0.3.3
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.7.2 github.com/hashicorp/vault/api v1.8.1
github.com/kubernetes-csi/csi-lib-utils v0.11.0 github.com/kubernetes-csi/csi-lib-utils v0.11.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.1 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.1
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo/v2 v2.1.6 github.com/onsi/ginkgo/v2 v2.4.0
github.com/onsi/gomega v1.20.1 github.com/onsi/gomega v1.22.1
github.com/pkg/xattr v0.4.7 github.com/pkg/xattr v0.4.7
github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_golang v1.12.2
github.com/stretchr/testify v1.8.0 github.com/stretchr/testify v1.8.1
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
golang.org/x/net v0.0.0-20220722155237-a158d28d115b golang.org/x/net v0.1.0
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f golang.org/x/sys v0.1.0
google.golang.org/grpc v1.49.0 google.golang.org/grpc v1.49.0
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.0
k8s.io/api v0.25.0 k8s.io/api v0.25.0
@ -53,11 +53,11 @@ require (
github.com/ansel1/merry/v2 v2.0.1 // indirect github.com/ansel1/merry/v2 v2.0.1 // indirect
github.com/armon/go-metrics v0.3.9 // indirect github.com/armon/go-metrics v0.3.9 // indirect
github.com/armon/go-radix v1.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.16.14 // indirect github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect
github.com/aws/smithy-go v1.13.2 // indirect github.com/aws/smithy-go v1.13.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v3 v3.0.0 // indirect github.com/cenkalti/backoff/v3 v3.0.0 // indirect
@ -100,7 +100,7 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/vault v1.4.2 // indirect github.com/hashicorp/vault v1.4.2 // indirect
github.com/hashicorp/vault/sdk v0.5.1 // indirect github.com/hashicorp/vault/sdk v0.6.0 // indirect
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
@ -109,8 +109,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.2.1 // indirect github.com/kr/pretty v0.2.1 // indirect
github.com/mailru/easyjson v0.7.6 // indirect github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect
@ -146,12 +146,12 @@ require (
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.21.0 // indirect go.uber.org/zap v1.23.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.1.0 // indirect
golang.org/x/text v0.3.8 // indirect golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect

86
go.sum
View File

@ -160,20 +160,20 @@ github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.67/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.67/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.96 h1:S9paaqnJ0AJ95t5AB+iK8RM6YNZN0W0Lek1gOVJsEr8= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
github.com/aws/aws-sdk-go v1.44.96/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.16.14 h1:db6GvO4Z2UqHt5gvT0lr6J5x5P+oQ7bdRzczVaRekMU= github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk=
github.com/aws/aws-sdk-go-v2 v1.16.14/go.mod h1:s/G+UV29dECbF5rf+RNj1xhlmvoNurGSr+McVSRj59w= github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21 h1:gRIXnmAVNyoRQywdNtpAkgY+f30QNzgF53Q5OobNZZs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 h1:nBO/RFxeq/IS5G9Of+ZrgucRciie2qpLy++3UGZ+q2E=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21/go.mod h1:XsmHMV9c512xgsW01q7H0ut+UQQQpWX8QsFbdLHDwaU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15 h1:noAhOo2mMDyYhTx99aYPvQw16T3fQ/DiKAv9fzpIKH8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 h1:oRHDrwCTVT8ZXi4sr9Ld+EXk7N/KGssOr2ygNeojEhw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15/go.mod h1:kjJ4CyD9M3Wq88GYg3IPfj67Rs0Uvz8aXK7MJ8BvE4I= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15 h1:xlf0J6DUgAj/ocvKQxCmad8Bu1lJuRbt5Wu+4G1xw1g= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 h1:GE25AWCdNUPh9AOJzI9KIJnja7IwUc1WyUqz/JTyJ/I=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15/go.mod h1:ZVJ7ejRl4+tkWMuCwjXoy0jd8fF5u3RCyWjSVjUIvQE= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.17 h1:LVM2jzEQ8mhb2dhrFl4PJ3sa5+KcKT01dsMk2Ma9/FU= github.com/aws/aws-sdk-go-v2/service/sts v1.17.1 h1:KRAix/KHvjGODaHAMXnxRk9t0D+4IJVUuS/uwXxngXk=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.17/go.mod h1:bQujK1n0V1D1Gz5uII1jaB1WDvhj4/T3tElsJnVXCR0= github.com/aws/aws-sdk-go-v2/service/sts v1.17.1/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4=
github.com/aws/smithy-go v1.13.2 h1:TBLKyeJfXTrTXRHmsv4qWt9IQGYyWThLYaJWSahTOGE= github.com/aws/smithy-go v1.13.4 h1:/RN2z1txIJWeXeOkzX+Hk/4Uuvv7dWtCjbmVJcrskyk=
github.com/aws/smithy-go v1.13.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/baum/kmip-go v0.0.0-20220714190649-7b37ecf92eb2/go.mod h1:5WlKRqL5dfI68V56W+4ZmlPSL+TSfqQrKJYI8CSJz+E= github.com/baum/kmip-go v0.0.0-20220714190649-7b37ecf92eb2/go.mod h1:5WlKRqL5dfI68V56W+4ZmlPSL+TSfqQrKJYI8CSJz+E=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -371,8 +371,8 @@ github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE
github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w=
github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8= github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8=
github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk= github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk=
github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 h1:AGbIx+qTKLkYrrxL6QuwjAR5MvbuX06uMHJFb8mG+ro= github.com/gemalto/kmip-go v0.0.8 h1:RvKWTd2ACxOs7OF1f6SvPYebjmQbN0myfDHVQmX/k8g=
github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26/go.mod h1:7bAnjuzri8yGoJMwngnAd0HdXMRDQU+l1Zaiz12Tr68= github.com/gemalto/kmip-go v0.0.8/go.mod h1:7bAnjuzri8yGoJMwngnAd0HdXMRDQU+l1Zaiz12Tr68=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -607,6 +607,8 @@ github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTr
github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs= github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8=
github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk=
github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg=
github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@ -708,8 +710,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
github.com/hashicorp/vault/api v1.7.2 h1:kawHE7s/4xwrdKbkmwQi0wYaIeUhk5ueek7ljuezCVQ= github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI=
github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M= github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E=
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
@ -719,8 +721,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.5.1 h1:zly/TmNgOXCGgWIRA8GojyXzG817POtVh3uzIwzZx+8= github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs=
github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU= github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
@ -833,8 +835,9 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@ -842,8 +845,9 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
@ -935,8 +939,10 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -945,8 +951,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
github.com/onsi/gomega v1.22.1 h1:pY8O4lBfsHKZHM/6nrxkhVPUznOlIu3quZcKP/M20KI=
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@ -1119,8 +1127,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -1128,8 +1137,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@ -1214,8 +1224,9 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0H
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -1232,8 +1243,9 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1257,6 +1269,8 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1306,6 +1320,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1367,8 +1382,9 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1505,14 +1521,17 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1523,8 +1542,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1608,6 +1627,7 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -409,6 +409,11 @@ func (cs *ControllerServer) CreateVolume(
// Set Metadata on PV Create // Set Metadata on PV Create
err = volClient.SetAllMetadata(metadata) err = volClient.SetAllMetadata(metadata)
if err != nil { if err != nil {
purgeErr := volClient.PurgeVolume(ctx, true)
if purgeErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
}
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
} }

View File

@ -633,6 +633,17 @@ func (ns *NodeServer) NodeGetVolumeStats(
stat, err := os.Stat(targetPath) stat, err := os.Stat(targetPath)
if err != nil { if err != nil {
if util.IsCorruptedMountError(err) {
log.WarningLog(ctx, "corrupted mount detected in %q: %v", targetPath, err)
return &csi.NodeGetVolumeStatsResponse{
VolumeCondition: &csi.VolumeCondition{
Abnormal: true,
Message: err.Error(),
},
}, nil
}
return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err) return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err)
} }

View File

@ -864,3 +864,9 @@ func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context,
return setOMapKeys(ctx, conn, journalPool, cj.namespace, cj.csiDirectory, setKeys) return setOMapKeys(ctx, conn, journalPool, cj.namespace, cj.csiDirectory, setKeys)
} }
// ResetVolumeOwner updates the owner in the rados object.
func (conn *Connection) ResetVolumeOwner(ctx context.Context, pool, reservedUUID, owner string) error {
return setOMapKeys(ctx, conn, pool, conn.config.namespace, conn.config.cephUUIDDirectoryPrefix+reservedUUID,
map[string]string{conn.config.ownerKey: owner})
}

View File

@ -1268,6 +1268,17 @@ func (ns *NodeServer) NodeGetVolumeStats(
stat, err := os.Stat(targetPath) stat, err := os.Stat(targetPath)
if err != nil { if err != nil {
if util.IsCorruptedMountError(err) {
log.WarningLog(ctx, "corrupted mount detected in %q: %v", targetPath, err)
return &csi.NodeGetVolumeStatsResponse{
VolumeCondition: &csi.VolumeCondition{
Abnormal: true,
Message: err.Error(),
},
}, nil
}
return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err) return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err)
} }

View File

@ -538,7 +538,7 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
// Generate new volume Handler // Generate new volume Handler
// The volume handler won't remain same as its contains poolID,clusterID etc // The volume handler won't remain same as its contains poolID,clusterID etc
// which are not same across clusters. // which are not same across clusters.
// nolint:gocyclo,cyclop // TODO: reduce complexity // nolint:gocyclo,cyclop,nestif // TODO: reduce complexity
func RegenerateJournal( func RegenerateJournal(
volumeAttributes map[string]string, volumeAttributes map[string]string,
claimName, claimName,
@ -625,8 +625,14 @@ func RegenerateJournal(
return "", err return "", err
} }
} }
if rbdVol.Owner != owner {
err = j.ResetVolumeOwner(ctx, rbdVol.JournalPool, rbdVol.ReservedID, owner)
if err != nil {
return "", err
}
}
// Update Metadata on reattach of the same old PV // Update Metadata on reattach of the same old PV
parameters := k8s.PrepareVolumeMetadata(claimName, rbdVol.Owner, "") parameters := k8s.PrepareVolumeMetadata(claimName, owner, "")
err = rbdVol.setAllMetadata(parameters) err = rbdVol.setAllMetadata(parameters)
if err != nil { if err != nil {
return "", fmt.Errorf("failed to set volume metadata: %w", err) return "", fmt.Errorf("failed to set volume metadata: %w", err)

View File

@ -761,13 +761,13 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
if err != nil { if err != nil {
log.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, fmt.Errorf("failed to get remote status: %w", err) return nil, status.Errorf(codes.Internal, "failed to get remote status: %v", err)
} }
description := remoteStatus.Description description := remoteStatus.Description
lastSyncTime, err := getLastSyncTime(description) lastSyncTime, err := getLastSyncTime(description)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get last sync time: %w", err) return nil, status.Errorf(codes.Internal, "failed to get last sync time: %v", err)
} }
resp := &replication.GetVolumeReplicationInfoResponse{ resp := &replication.GetVolumeReplicationInfoResponse{

View File

@ -261,7 +261,7 @@ up)
KUBE_MAJOR=$(kube_version 1) KUBE_MAJOR=$(kube_version 1)
KUBE_MINOR=$(kube_version 2) KUBE_MINOR=$(kube_version 2)
if [ "${KUBE_MAJOR}" -eq 1 ] && [ "${KUBE_MINOR}" -ge 22 ];then if [ "${KUBE_MAJOR}" -eq 1 ] && [ "${KUBE_MINOR}" -ge 22 ];then
K8S_FEATURE_GATES="${K8S_FEATURE_GATES},ReadWriteOncePod=true,PodSecurity=false" K8S_FEATURE_GATES="${K8S_FEATURE_GATES},ReadWriteOncePod=true"
fi fi
if [ "${KUBE_MAJOR}" -eq 1 ] && [ "${KUBE_MINOR}" -ge 23 ];then if [ "${KUBE_MAJOR}" -eq 1 ] && [ "${KUBE_MINOR}" -ge 23 ];then
K8S_FEATURE_GATES="${K8S_FEATURE_GATES},RecoverVolumeExpansionFailure=true" K8S_FEATURE_GATES="${K8S_FEATURE_GATES},RecoverVolumeExpansionFailure=true"

View File

@ -26,9 +26,9 @@ type Config struct {
// information on AWS regions. // information on AWS regions.
Region string Region string
// The credentials object to use when signing requests. Defaults to a // The credentials object to use when signing requests.
// chain of credential providers to search for credentials in environment // Use the LoadDefaultConfig to load configuration from all the SDK's supported
// variables, shared credential file, and EC2 Instance Roles. // sources, and resolve credentials using the SDK's default credential chain.
Credentials CredentialsProvider Credentials CredentialsProvider
// The Bearer Authentication token provider to use for authenticating API // The Bearer Authentication token provider to use for authenticating API

View File

@ -178,6 +178,12 @@ func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil)) p.creds.Store((*Credentials)(nil))
} }
// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache
// matches the target provider type.
func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool {
return IsCredentialsProvider(p.provider, target)
}
// HandleFailRefreshCredentialsCacheStrategy is an interface for // HandleFailRefreshCredentialsCacheStrategy is an interface for
// CredentialsCache to allow CredentialsProvider how failed to refresh // CredentialsCache to allow CredentialsProvider how failed to refresh
// credentials is handled. // credentials is handled.

View File

@ -3,6 +3,7 @@ package aws
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"time" "time"
"github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/aws-sdk-go-v2/internal/sdk"
@ -129,3 +130,41 @@ type CredentialsProviderFunc func(context.Context) (Credentials, error)
func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
return fn(ctx) return fn(ctx)
} }
type isCredentialsProvider interface {
IsCredentialsProvider(CredentialsProvider) bool
}
// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the
// implementation type.
//
// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating
// whether target matches the credential provider type.
//
// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used:
//
// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false
// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false
// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false
// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false
func IsCredentialsProvider(provider, target CredentialsProvider) bool {
if target == nil || provider == nil {
return provider == target
}
if x, ok := provider.(isCredentialsProvider); ok {
return x.IsCredentialsProvider(target)
}
targetType := reflect.TypeOf(target)
if targetType.Kind() != reflect.Ptr {
targetType = reflect.PtrTo(targetType)
}
providerType := reflect.TypeOf(provider)
if providerType.Kind() != reflect.Ptr {
providerType = reflect.PtrTo(providerType)
}
return targetType.AssignableTo(providerType)
}

View File

@ -3,4 +3,4 @@
package aws package aws
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.14" const goModuleVersion = "1.17.1"

View File

@ -93,7 +93,7 @@ func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
} }
// MaxAttempts returns the maximum number of attempts that can be made for // MaxAttempts returns the maximum number of attempts that can be made for
// a attempt before failing. A value of 0 implies that the attempt should // an attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable. // be retried until it succeeds if the errors are retryable.
func (a *AdaptiveMode) MaxAttempts() int { func (a *AdaptiveMode) MaxAttempts() int {
return a.retryer.MaxAttempts() return a.retryer.MaxAttempts()
@ -127,7 +127,7 @@ func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
// GetAttemptToken returns the attempt token that can be used to rate limit // GetAttemptToken returns the attempt token that can be used to rate limit
// attempt calls. Will be used by the SDK's retry package's Attempt // attempt calls. Will be used by the SDK's retry package's Attempt
// middleware to get a attempt token prior to calling the temp and releasing // middleware to get an attempt token prior to calling the temp and releasing
// the attempt token after the attempt has been made. // the attempt token after the attempt has been made.
func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
for { for {

View File

@ -49,7 +49,7 @@ type Retryer interface {
IsErrorRetryable(error) bool IsErrorRetryable(error) bool
// MaxAttempts returns the maximum number of attempts that can be made for // MaxAttempts returns the maximum number of attempts that can be made for
// a attempt before failing. A value of 0 implies that the attempt should // an attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable. // be retried until it succeeds if the errors are retryable.
MaxAttempts() int MaxAttempts() int
@ -66,7 +66,7 @@ type Retryer interface {
GetInitialToken() (releaseToken func(error) error) GetInitialToken() (releaseToken func(error) error)
} }
// RetryerV2 is an interface to determine if a given error from a attempt // RetryerV2 is an interface to determine if a given error from an attempt
// should be retried, and if so what backoff delay to apply. The default // should be retried, and if so what backoff delay to apply. The default
// implementation used by most services is the retry package's Standard type. // implementation used by most services is the retry package's Standard type.
// Which contains basic retry logic using exponential backoff. // Which contains basic retry logic using exponential backoff.

View File

@ -371,13 +371,8 @@ func haveCredentialProvider(p aws.CredentialsProvider) bool {
if p == nil { if p == nil {
return false return false
} }
switch p.(type) {
case aws.AnonymousCredentials,
*aws.AnonymousCredentials:
return false
}
return true return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil))
} }
type payloadHashKey struct{} type payloadHashKey struct{}

View File

@ -1,3 +1,19 @@
# v1.1.25 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.24 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.23 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.22 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.21 (2022-09-02) # v1.1.21 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package configsources package configsources
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.21" const goModuleVersion = "1.1.25"

View File

@ -1,3 +1,19 @@
# v2.4.19 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.18 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.16 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.15 (2022-09-02) # v2.4.15 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package endpoints package endpoints
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "2.4.15" const goModuleVersion = "2.4.19"

View File

@ -1,3 +1,19 @@
# v1.9.19 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.18 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.16 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.15 (2022-09-02) # v1.9.15 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package presignedurl package presignedurl
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.9.15" const goModuleVersion = "1.9.19"

View File

@ -1,3 +1,20 @@
# v1.17.1 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.0 (2022-10-21)
* **Feature**: Add presign functionality for sts:AssumeRole operation
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.19 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.18 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.17 (2022-09-02) # v1.16.17 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View File

@ -415,3 +415,27 @@ func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.Reg
OperationName: "AssumeRole", OperationName: "AssumeRole",
} }
} }
// PresignAssumeRole is used to generate a presigned HTTP Request which contains
// presigned URL, signed headers and HTTP method used.
func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
if params == nil {
params = &AssumeRoleInput{}
}
options := c.options.copy()
for _, fn := range optFns {
fn(&options)
}
clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns,
c.client.addOperationAssumeRoleMiddlewares,
presignConverter(options).convertToPresignMiddleware,
)
if err != nil {
return nil, err
}
out := result.(*v4.PresignedHTTPRequest)
return out, nil
}

View File

@ -3,4 +3,4 @@
package sts package sts
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.17" const goModuleVersion = "1.17.1"

File diff suppressed because it is too large Load Diff

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.96" const SDKVersion = "1.44.122"

View File

@ -1,9 +1,8 @@
package shareddefaults package shareddefaults
import ( import (
"os" "os/user"
"path/filepath" "path/filepath"
"runtime"
) )
// SharedCredentialsFilename returns the SDK's default file path // SharedCredentialsFilename returns the SDK's default file path
@ -31,10 +30,17 @@ func SharedConfigFilename() string {
// UserHomeDir returns the home directory for the user the process is // UserHomeDir returns the home directory for the user the process is
// running under. // running under.
func UserHomeDir() string { func UserHomeDir() string {
if runtime.GOOS == "windows" { // Windows var home string
return os.Getenv("USERPROFILE")
home = userHomeDir()
if len(home) > 0 {
return home
} }
// *nix currUser, _ := user.Current()
return os.Getenv("HOME") if currUser != nil {
home = currUser.HomeDir
}
return home
} }

View File

@ -0,0 +1,18 @@
//go:build !go1.12
// +build !go1.12
package shareddefaults
import (
"os"
"runtime"
)
func userHomeDir() string {
if runtime.GOOS == "windows" { // Windows
return os.Getenv("USERPROFILE")
}
// *nix
return os.Getenv("HOME")
}

View File

@ -0,0 +1,13 @@
//go:build go1.12
// +build go1.12
package shareddefaults
import (
"os"
)
func userHomeDir() string {
home, _ := os.UserHomeDir()
return home
}

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,13 @@
# Release (2022-10-24)
## Module Highlights
* `github.com/aws/smithy-go`: v1.13.4
* **Bug Fix**: fixed document type checking for encoding nested types
# Release (2022-09-14)
* No change notes available for this release.
# Release (v1.13.2) # Release (v1.13.2)
* No change notes available for this release. * No change notes available for this release.

View File

@ -14,6 +14,9 @@ REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION}
UNIT_TEST_TAGS=
BUILD_TAGS=
ifneq ($(PRE_RELEASE_VERSION),) ifneq ($(PRE_RELEASE_VERSION),)
REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
endif endif
@ -27,6 +30,37 @@ smithy-build:
smithy-clean: smithy-clean:
cd codegen && ./gradlew clean cd codegen && ./gradlew clean
##################
# Linting/Verify #
##################
.PHONY: verify vet
verify: vet
vet:
go vet ${BUILD_TAGS} --all ./...
################
# Unit Testing #
################
.PHONY: unit unit-race unit-test unit-race-test
unit: verify
go vet ${BUILD_TAGS} --all ./... && \
go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
go test -timeout=1m ${UNIT_TEST_TAGS} ./...
unit-race: verify
go vet ${BUILD_TAGS} --all ./... && \
go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
unit-test: verify
go test -timeout=1m ${UNIT_TEST_TAGS} ./...
unit-race-test: verify
go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
##################### #####################
# Release Process # # Release Process #
##################### #####################

View File

@ -3,4 +3,4 @@
package smithy package smithy
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.2" const goModuleVersion = "1.13.4"

11
vendor/github.com/aws/smithy-go/modman.toml generated vendored Normal file
View File

@ -0,0 +1,11 @@
[dependencies]
"github.com/google/go-cmp" = "v0.5.8"
"github.com/jmespath/go-jmespath" = "v0.4.0"
[modules]
[modules.codegen]
no_tag = true
[modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"]
no_tag = true

View File

@ -58,7 +58,6 @@ linters-settings:
gocritic: gocritic:
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
disabled-checks: disabled-checks:
- commentFormatting
revive: revive:
ignore-generated-header: true ignore-generated-header: true
wsl: wsl:
@ -120,7 +119,6 @@ linters:
- goprintffuncname - goprintffuncname
- gosec - gosec
- grouper - grouper
- ifshort
- importas - importas
# - ireturn # there are valid use cases for this pattern. too strict. # - ireturn # there are valid use cases for this pattern. too strict.
## - lll # checks line length. not enforced ## - lll # checks line length. not enforced

View File

@ -101,17 +101,17 @@ type AttestationCredentialValue struct {
// A Key Block object is a structure (see Table 7) used to encapsulate all of the information that is // A Key Block object is a structure (see Table 7) used to encapsulate all of the information that is
// closely associated with a cryptographic key. It contains a Key Value of one of the following Key Format Types: // closely associated with a cryptographic key. It contains a Key Value of one of the following Key Format Types:
// //
// · Raw This is a key that contains only cryptographic key material, encoded as a string of bytes. // - Raw This is a key that contains only cryptographic key material, encoded as a string of bytes.
// · Opaque This is an encoded key for which the encoding is unknown to the key management system. // - Opaque This is an encoded key for which the encoding is unknown to the key management system.
// It is encoded as a string of bytes. // It is encoded as a string of bytes.
// · PKCS1 This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#1 object. // - PKCS1 This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#1 object.
// · PKCS8 This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#8 object, supporting both // - PKCS8 This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#8 object, supporting both
// the RSAPrivateKey syntax and EncryptedPrivateKey. // the RSAPrivateKey syntax and EncryptedPrivateKey.
// · X.509 This is an encoded object, expressed as a DER-encoded ASN.1 X.509 object. // - X.509 This is an encoded object, expressed as a DER-encoded ASN.1 X.509 object.
// · ECPrivateKey This is an ASN.1 encoded elliptic curve private key. // - ECPrivateKey This is an ASN.1 encoded elliptic curve private key.
// · Several Transparent Key types These are algorithm-specific structures containing defined values // - Several Transparent Key types These are algorithm-specific structures containing defined values
// for the various key types, as defined in Section 2.1.7. // for the various key types, as defined in Section 2.1.7.
// · Extensions These are vendor-specific extensions to allow for proprietary or legacy key formats. // - Extensions These are vendor-specific extensions to allow for proprietary or legacy key formats.
// //
// The Key Block MAY contain the Key Compression Type, which indicates the format of the elliptic curve public // The Key Block MAY contain the Key Compression Type, which indicates the format of the elliptic curve public
// key. By default, the public key is uncompressed. // key. By default, the public key is uncompressed.
@ -119,13 +119,12 @@ type AttestationCredentialValue struct {
// The Key Block also has the Cryptographic Algorithm and the Cryptographic Length of the key contained // The Key Block also has the Cryptographic Algorithm and the Cryptographic Length of the key contained
// in the Key Value field. Some example values are: // in the Key Value field. Some example values are:
// //
// · RSA keys are typically 1024, 2048 or 3072 bits in length. // - RSA keys are typically 1024, 2048 or 3072 bits in length.
// · 3DES keys are typically from 112 to 192 bits (depending upon key length and the presence of parity bits). // - 3DES keys are typically from 112 to 192 bits (depending upon key length and the presence of parity bits).
// · AES keys are 128, 192 or 256 bits in length. // - AES keys are 128, 192 or 256 bits in length.
// //
// The Key Block SHALL contain a Key Wrapping Data structure if the key in the Key Value field is // The Key Block SHALL contain a Key Wrapping Data structure if the key in the Key Value field is
// wrapped (i.e., encrypted, or MACed/signed, or both). // wrapped (i.e., encrypted, or MACed/signed, or both).
type KeyBlock struct { type KeyBlock struct {
KeyFormatType kmip14.KeyFormatType KeyFormatType kmip14.KeyFormatType
KeyCompressionType kmip14.KeyCompressionType `ttlv:",omitempty"` KeyCompressionType kmip14.KeyCompressionType `ttlv:",omitempty"`
@ -139,12 +138,12 @@ type KeyBlock struct {
// //
// The Key Value is used only inside a Key Block and is either a Byte String or a structure (see Table 8): // The Key Value is used only inside a Key Block and is either a Byte String or a structure (see Table 8):
// //
// · The Key Value structure contains the key material, either as a byte string or as a Transparent Key // - The Key Value structure contains the key material, either as a byte string or as a Transparent Key
// structure (see Section 2.1.7), and OPTIONAL attribute information that is associated and encapsulated // structure (see Section 2.1.7), and OPTIONAL attribute information that is associated and encapsulated
// with the key material. This attribute information differs from the attributes associated with Managed // with the key material. This attribute information differs from the attributes associated with Managed
// Objects, and is obtained via the Get Attributes operation, only by the fact that it is encapsulated with // Objects, and is obtained via the Get Attributes operation, only by the fact that it is encapsulated with
// (and possibly wrapped with) the key material itself. // (and possibly wrapped with) the key material itself.
// · The Key Value Byte String is either the wrapped TTLV-encoded (see Section 9.1) Key Value structure, or // - The Key Value Byte String is either the wrapped TTLV-encoded (see Section 9.1) Key Value structure, or
// the wrapped un-encoded value of the Byte String Key Material field. // the wrapped un-encoded value of the Byte String Key Material field.
// //
// TODO: Unmarshaler impl which unmarshals correct KeyMaterial type. // TODO: Unmarshaler impl which unmarshals correct KeyMaterial type.
@ -163,14 +162,14 @@ type KeyValue struct {
// //
// This structure contains fields for: // This structure contains fields for:
// //
// · A Wrapping Method, which indicates the method used to wrap the Key Value. // - A Wrapping Method, which indicates the method used to wrap the Key Value.
// · Encryption Key Information, which contains the Unique Identifier (see 3.1) value of the encryption key // - Encryption Key Information, which contains the Unique Identifier (see 3.1) value of the encryption key
// and associated cryptographic parameters. // and associated cryptographic parameters.
// · MAC/Signature Key Information, which contains the Unique Identifier value of the MAC/signature key // - MAC/Signature Key Information, which contains the Unique Identifier value of the MAC/signature key
// and associated cryptographic parameters. // and associated cryptographic parameters.
// · A MAC/Signature, which contains a MAC or signature of the Key Value. // - A MAC/Signature, which contains a MAC or signature of the Key Value.
// · An IV/Counter/Nonce, if REQUIRED by the wrapping method. // - An IV/Counter/Nonce, if REQUIRED by the wrapping method.
// · An Encoding Option, specifying the encoding of the Key Material within the Key Value structure of the // - An Encoding Option, specifying the encoding of the Key Material within the Key Value structure of the
// Key Block that has been wrapped. If No Encoding is specified, then the Key Value structure SHALL NOT contain // Key Block that has been wrapped. If No Encoding is specified, then the Key Value structure SHALL NOT contain
// any attributes. // any attributes.
// //
@ -184,17 +183,17 @@ type KeyValue struct {
// //
// The following wrapping methods are currently defined: // The following wrapping methods are currently defined:
// //
// · Encrypt only (i.e., encryption using a symmetric key or public key, or authenticated encryption algorithms that use a single key). // - Encrypt only (i.e., encryption using a symmetric key or public key, or authenticated encryption algorithms that use a single key).
// · MAC/sign only (i.e., either MACing the Key Value with a symmetric key, or signing the Key Value with a private key). // - MAC/sign only (i.e., either MACing the Key Value with a symmetric key, or signing the Key Value with a private key).
// · Encrypt then MAC/sign. // - Encrypt then MAC/sign.
// · MAC/sign then encrypt. // - MAC/sign then encrypt.
// · TR-31. // - TR-31.
// · Extensions. // - Extensions.
// //
// The following encoding options are currently defined: // The following encoding options are currently defined:
// //
// · No Encoding (i.e., the wrapped un-encoded value of the Byte String Key Material field in the Key Value structure). // - No Encoding (i.e., the wrapped un-encoded value of the Byte String Key Material field in the Key Value structure).
// · TTLV Encoding (i.e., the wrapped TTLV-encoded Key Value structure). // - TTLV Encoding (i.e., the wrapped TTLV-encoded Key Value structure).
type KeyWrappingData struct { type KeyWrappingData struct {
WrappingMethod kmip14.WrappingMethod WrappingMethod kmip14.WrappingMethod
EncryptionKeyInformation *EncryptionKeyInformation EncryptionKeyInformation *EncryptionKeyInformation
@ -254,9 +253,9 @@ type TransparentDSAPublicKey struct {
// //
// One of the following SHALL be present (refer to [PKCS#1]): // One of the following SHALL be present (refer to [PKCS#1]):
// //
// · Private Exponent, // - Private Exponent,
// · P and Q (the first two prime factors of Modulus), or // - P and Q (the first two prime factors of Modulus), or
// · Prime Exponent P and Prime Exponent Q. // - Prime Exponent P and Prime Exponent Q.
type TransparentRSAPrivateKey struct { type TransparentRSAPrivateKey struct {
Modulus *big.Int `validate:"required"` Modulus *big.Int `validate:"required"`
PrivateExponent, PublicExponent *big.Int PrivateExponent, PublicExponent *big.Int
@ -395,10 +394,10 @@ type TransparentECPublicKey struct {
// //
// The Template-Attribute, Common Template-Attribute, Private Key Template-Attribute, and Public Key // The Template-Attribute, Common Template-Attribute, Private Key Template-Attribute, and Public Key
// Template-Attribute structures are defined identically as follows: // Template-Attribute structures are defined identically as follows:
//
// type TemplateAttribute struct { // type TemplateAttribute struct {
// Attribute []Attribute // Attribute []Attribute
// } // }
type TemplateAttribute struct { type TemplateAttribute struct {
Name []Name Name []Name
Attribute []Attribute Attribute []Attribute

View File

@ -12,12 +12,12 @@ import (
"github.com/gemalto/kmip-go/ttlv" "github.com/gemalto/kmip-go/ttlv"
) )
// nolint:gochecknoinits //nolint:gochecknoinits
func init() { func init() {
Register(&ttlv.DefaultRegistry) Register(&ttlv.DefaultRegistry)
} }
// Register registers the 1.4 enumeration values with the registry. // Registers the 1.4 enumeration values with the registry.
func Register(registry *ttlv.Registry) { func Register(registry *ttlv.Registry) {
RegisterGeneratedDefinitions(registry) RegisterGeneratedDefinitions(registry)
} }

View File

@ -5,7 +5,6 @@ import (
) )
// DestroyRequestPayload //////////////////////////////////////// // DestroyRequestPayload ////////////////////////////////////////
//
type DestroyRequestPayload struct { type DestroyRequestPayload struct {
UniqueIdentifier string UniqueIdentifier string
} }

View File

@ -7,7 +7,6 @@ import (
) )
// GetRequestPayload //////////////////////////////////////// // GetRequestPayload ////////////////////////////////////////
//
type GetRequestPayload struct { type GetRequestPayload struct {
UniqueIdentifier string UniqueIdentifier string
} }

View File

@ -50,7 +50,7 @@ var ErrUnexpectedValue = errors.New("no field was found to unmarshal value into"
// cause ErrUnsupportedTypeError is returned. If the source value's type is not recognized, // cause ErrUnsupportedTypeError is returned. If the source value's type is not recognized,
// *UnmarshalerError with cause ErrInvalidType is returned. // *UnmarshalerError with cause ErrInvalidType is returned.
// //
// Unmarshaling Structure // # Unmarshaling Structure
// //
// Unmarshal will try to match the values in the Structure with the fields in the // Unmarshal will try to match the values in the Structure with the fields in the
// destination struct. Structure is an array of values, while a struct is more like // destination struct. Structure is an array of values, while a struct is more like
@ -64,11 +64,13 @@ var ErrUnexpectedValue = errors.New("no field was found to unmarshal value into"
// 1. If the type of a field is a struct, and the struct contains a field named "TTLVTag", and the field // 1. If the type of a field is a struct, and the struct contains a field named "TTLVTag", and the field
// has a "ttlv" struct tag, the value of the struct tag will be parsed using ParseTag(). If // has a "ttlv" struct tag, the value of the struct tag will be parsed using ParseTag(). If
// parsing fails, an error is returned. The type and value of the TTLVTag field is ignored. // parsing fails, an error is returned. The type and value of the TTLVTag field is ignored.
// In this example, the F field will map to TagDeactivationDate: //
// In this example, the F field will map to TagDeactivationDate.
// //
// type Bar struct { // type Bar struct {
// F Foo // F Foo
// } // }
//
// type Foo struct { // type Foo struct {
// TTLVTag struct{} `ttlv:"DeactivationDate"` // TTLVTag struct{} `ttlv:"DeactivationDate"`
// } // }
@ -81,6 +83,7 @@ var ErrUnexpectedValue = errors.New("no field was found to unmarshal value into"
// // conflicts with Foo's intrinsic tag // // conflicts with Foo's intrinsic tag
// F2 Foo `ttlv:"0x420034"` // the value can also be hex // F2 Foo `ttlv:"0x420034"` // the value can also be hex
// } // }
//
// 2. If the type of the field is a struct, and the struct contains a field named "TTLVTag", // 2. If the type of the field is a struct, and the struct contains a field named "TTLVTag",
// and that field is of type ttlv.Tag and is not empty, the value of the field will be the // and that field is of type ttlv.Tag and is not empty, the value of the field will be the
// inferred Tag. For example: // inferred Tag. For example:
@ -91,8 +94,9 @@ var ErrUnexpectedValue = errors.New("no field was found to unmarshal value into"
// f := Foo{TTLVTag: ttlv.TagState} // f := Foo{TTLVTag: ttlv.TagState}
// //
// This allows you to dynamically set the KMIP tag that a value will marshal to. // This allows you to dynamically set the KMIP tag that a value will marshal to.
//
// 3. The "ttlv" struct tag can be used to indicate the tag for a field. The value will // 3. The "ttlv" struct tag can be used to indicate the tag for a field. The value will
// be parsed with ParseTag() // be parsed with ParseTag():
// //
// type Bar struct { // type Bar struct {
// F Foo `ttlv:"DerivationData"` // F Foo `ttlv:"DerivationData"`
@ -318,7 +322,7 @@ func (dec *Decoder) unmarshal(val reflect.Value, ttlv TTLV) error {
} }
val.SetBool(ttlv.ValueBoolean()) val.SetBool(ttlv.ValueBoolean())
// nolint:dupl //nolint:dupl
case TypeEnumeration: case TypeEnumeration:
switch val.Kind() { switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
@ -338,7 +342,7 @@ func (dec *Decoder) unmarshal(val reflect.Value, ttlv TTLV) error {
default: default:
return typeMismatchErr() return typeMismatchErr()
} }
// nolint:dupl //nolint:dupl
case TypeInteger: case TypeInteger:
switch val.Kind() { switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:

View File

@ -40,7 +40,9 @@ var (
// and from the inferred KMIP tag, according to these rules: // and from the inferred KMIP tag, according to these rules:
// //
// 1. If the value is a TTLV, it is copied byte for byte // 1. If the value is a TTLV, it is copied byte for byte
//
// 2. If the value implements Marshaler, call that // 2. If the value implements Marshaler, call that
//
// 3. If the struct field has an "omitempty" flag, and the value is // 3. If the struct field has an "omitempty" flag, and the value is
// zero, skip the field: // zero, skip the field:
// //
@ -50,8 +52,10 @@ var (
// //
// 4. If the value is a slice (except []byte) or array, marshal all // 4. If the value is a slice (except []byte) or array, marshal all
// values concatenated // values concatenated
//
// 5. If a tag has not been inferred at this point, return *MarshalerError with // 5. If a tag has not been inferred at this point, return *MarshalerError with
// cause ErrNoTag // cause ErrNoTag
//
// 6. If the Tag is registered as an enum, or has the "enum" struct tag flag, attempt // 6. If the Tag is registered as an enum, or has the "enum" struct tag flag, attempt
// to marshal as an Enumeration. int, int8, int16, int32, and their uint counterparts // to marshal as an Enumeration. int, int8, int16, int32, and their uint counterparts
// can be marshaled as an Enumeration. A string can be marshaled to an Enumeration // can be marshaled as an Enumeration. A string can be marshaled to an Enumeration
@ -70,9 +74,11 @@ var (
// If the string can't be interpreted as an enum value, it will be encoded as a TextString. If // If the string can't be interpreted as an enum value, it will be encoded as a TextString. If
// the "enum" struct flag is set, the value *must* successfully encode to an Enumeration using // the "enum" struct flag is set, the value *must* successfully encode to an Enumeration using
// above rules, or an error is returned. // above rules, or an error is returned.
//
// 7. If the Tag is registered as a bitmask, or has the "bitmask" struct tag flag, attempt // 7. If the Tag is registered as a bitmask, or has the "bitmask" struct tag flag, attempt
// to marshal to an Integer, following the same rules as for Enumerations. The ParseInt() // to marshal to an Integer, following the same rules as for Enumerations. The ParseInt()
// function is used to parse string values. // function is used to parse string values.
//
// 9. time.Time marshals to DateTime. If the field has the "datetimeextended" struct flag, // 9. time.Time marshals to DateTime. If the field has the "datetimeextended" struct flag,
// marshal as DateTimeExtended. Example: // marshal as DateTimeExtended. Example:
// //
@ -81,13 +87,20 @@ var (
// } // }
// //
// 10. big.Int marshals to BigInteger // 10. big.Int marshals to BigInteger
//
// 11. time.Duration marshals to Interval // 11. time.Duration marshals to Interval
//
// 12. string marshals to TextString // 12. string marshals to TextString
//
// 13. []byte marshals to ByteString // 13. []byte marshals to ByteString
//
// 14. all int and uint variants except int64 and uint64 marshal to Integer. If the golang // 14. all int and uint variants except int64 and uint64 marshal to Integer. If the golang
// value overflows the KMIP value, *MarshalerError with cause ErrIntOverflow is returned // value overflows the KMIP value, *MarshalerError with cause ErrIntOverflow is returned
//
// 15. int64 and uint64 marshal to LongInteger // 15. int64 and uint64 marshal to LongInteger
//
// 16. bool marshals to Boolean // 16. bool marshals to Boolean
//
// 17. structs marshal to Structure. Each field of the struct will be marshaled into the // 17. structs marshal to Structure. Each field of the struct will be marshaled into the
// values of the Structure according to the above rules. // values of the Structure according to the above rules.
// //

View File

@ -289,7 +289,6 @@ func ParseType(s string, enumMap EnumMap) (Type, error) {
// be the name from the spec. Names should be in the normalized format // be the name from the spec. Names should be in the normalized format
// described in the KMIP spec (see NormalizeName()). // described in the KMIP spec (see NormalizeName()).
// //
//
// Value enumerations are used for encoding and decoding KMIP Enumeration values, // Value enumerations are used for encoding and decoding KMIP Enumeration values,
// KMIP Integer bitmask values, Types, and Tags. // KMIP Integer bitmask values, Types, and Tags.
type EnumMap interface { type EnumMap interface {

View File

@ -15,7 +15,7 @@ import (
// program. // program.
var DefaultRegistry Registry var DefaultRegistry Registry
// nolint:gochecknoinits //nolint:gochecknoinits
func init() { func init() {
RegisterTypes(&DefaultRegistry) RegisterTypes(&DefaultRegistry)
} }

View File

@ -94,7 +94,8 @@ func (t DateTimeExtended) MarshalTTLV(e *Encoder, tag Tag) error {
// bytes and native go types. It's useful in tests, or where you want to construct // bytes and native go types. It's useful in tests, or where you want to construct
// an arbitrary TTLV structure in code without declaring a bespoke type, e.g.: // an arbitrary TTLV structure in code without declaring a bespoke type, e.g.:
// //
// v := ttlv.Value{Tag: TagBatchCount, Value: Values{ // v := ttlv.Value{
// Tag: TagBatchCount, Value: Values{
// Value{Tag: TagComment, Value: "red"}, // Value{Tag: TagComment, Value: "red"},
// Value{Tag: TagComment, Value: "blue"}, // Value{Tag: TagComment, Value: "blue"},
// Value{Tag: TagComment, Value: "green"}, // Value{Tag: TagComment, Value: "green"},

787
vendor/github.com/go-logr/logr/funcr/funcr.go generated vendored Normal file
View File

@ -0,0 +1,787 @@
/*
Copyright 2021 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package funcr implements formatting of structured log messages and
// optionally captures the call site and timestamp.
//
// The simplest way to use it is via its implementation of a
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
// Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
// Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
// standard JSON tags (all except "string").
package funcr
import (
"bytes"
"encoding"
"fmt"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
)
// New returns a logr.Logger which is implemented by an arbitrary function.
func New(fn func(prefix, args string), opts Options) logr.Logger {
return logr.New(newSink(fn, NewFormatter(opts)))
}
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
// and produces JSON output.
func NewJSON(fn func(obj string), opts Options) logr.Logger {
fnWrapper := func(_, obj string) {
fn(obj)
}
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
}
// Underlier exposes access to the underlying logging function. Since
// callers only have a logr.Logger, they have to know which
// implementation is in use, so this interface is less of an
// abstraction and more of a way to test type conversion.
type Underlier interface {
GetUnderlying() func(prefix, args string)
}
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
l := &fnlogger{
Formatter: formatter,
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
l.Formatter.AddCallDepth(1)
return l
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// LogCaller tells funcr to add a "caller" key to some or all log lines.
// This has some overhead, so some users might not want it.
LogCaller MessageClass
// LogCallerFunc tells funcr to also log the calling function name. This
// has no effect if caller logging is not enabled (see Options.LogCaller).
LogCallerFunc bool
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
// overhead, so some users might not want it.
LogTimestamp bool
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
// is enabled. If not specified, a default format will be used. For more
// details, see docs for Go's time.Layout.
TimestampFormat string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
Verbosity int
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
// while a log line is being rendered. The kvList argument follows logr
// conventions - each pair of slice elements is comprised of a string key
// and an arbitrary value (verified and sanitized before calling this
// hook). The value returned must follow the same conventions. This hook
// can be used to audit or modify logged data. For example, you might want
// to prefix all of funcr's built-in keys with some string. This hook is
// only called for built-in (provided by funcr itself) key-value pairs.
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{}
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{}
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{}
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
// slice, array, or map the depth is increased by one. When the maximum is
// reached, the value will be converted to a string indicating that the max
// depth has been exceeded. If this field is not specified, a default
// value will be used.
MaxLogDepth int
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// fnlogger inherits some of its LogSink implementation from Formatter
// and just needs to add some glue code.
type fnlogger struct {
Formatter
write func(prefix, args string)
}
func (l fnlogger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) GetUnderlying() func(prefix, args string) {
return l.write
}
// Assert conformance to the interfaces.
var _ logr.LogSink = &fnlogger{}
var _ logr.CallDepthLogSink = &fnlogger{}
var _ Underlier = &fnlogger{}
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
func NewFormatter(opts Options) Formatter {
return newFormatter(opts, outputKeyValue)
}
// NewFormatterJSON constructs a Formatter which emits strict JSON.
func NewFormatterJSON(opts Options) Formatter {
return newFormatter(opts, outputJSON)
}
// Defaults for Options.
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
const defaultMaxLogDepth = 16
func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.TimestampFormat == "" {
opts.TimestampFormat = defaultTimestampFormat
}
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
values: nil,
depth: 0,
opts: opts,
}
return f
}
// Formatter is an opaque struct which can be embedded in a LogSink
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []interface{}
valuesStr string
depth int
opts Options
}
// outputFormat indicates which outputFormat to use.
type outputFormat int
const (
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
outputKeyValue outputFormat = iota
// outputJSON emits strict JSON.
outputJSON
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{}
// render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
}
continuing = true
buf.WriteString(f.valuesStr)
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If continuing is
// true, it assumes that the buffer has previous values and will emit a
// separator (which depends on the output format) before the first pair it
// writes. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
v := kvList[i+1]
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
buf.WriteByte(' ')
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) pretty(value interface{}) string {
return f.prettyWithFlags(value, 0, 0)
}
const (
flagRawStruct = 0x1 // do not print braces on structs
)
// TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"`
}
// Handle types that take full control of logging.
if v, ok := value.(logr.Marshaler); ok {
// Replace the value with what the type wants to get logged.
// That then gets handled below via reflection.
value = invokeMarshaler(v)
}
// Handle types that want to format themselves.
switch v := value.(type) {
case fmt.Stringer:
value = invokeStringer(v)
case error:
value = invokeError(v)
}
// Handling the most common types without reflect is a small perf win.
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case string:
return prettyString(v)
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(v, 10)
case uintptr:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
case complex64:
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
case complex128:
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
case PseudoStruct:
buf := bytes.NewBuffer(make([]byte, 0, 1024))
v = f.sanitize(v)
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
t := reflect.TypeOf(value)
if t == nil {
return "null"
}
v := reflect.ValueOf(value)
switch t.Kind() {
case reflect.Bool:
return strconv.FormatBool(v.Bool())
case reflect.String:
return prettyString(v.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(int64(v.Int()), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(uint64(v.Uint()), 10)
case reflect.Float32:
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.Complex64:
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
case reflect.Complex128:
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
case reflect.Struct:
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
// reflect says this field is only defined for non-exported fields.
continue
}
if !v.Field(i).CanInterface() {
// reflect isn't clear exactly what this means, but we can't use it.
continue
}
name := ""
omitempty := false
if tag, found := fld.Tag.Lookup("json"); found {
if tag == "-" {
continue
}
if comma := strings.Index(tag, ","); comma != -1 {
if n := tag[:comma]; n != "" {
name = n
}
rest := tag[comma:]
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
omitempty = true
}
} else {
name = tag
}
}
if omitempty && isEmpty(v.Field(i)) {
continue
}
if i > 0 {
buf.WriteByte(',')
}
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
}
if name == "" {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
case reflect.Slice, reflect.Array:
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
}
buf.WriteByte(']')
return buf.String()
case reflect.Map:
buf.WriteByte('{')
// This does not sort the map keys, for best perf.
it := v.MapRange()
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
}
// If a map key supports TextMarshaler, use it.
keystr := ""
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
txt, err := m.MarshalText()
if err != nil {
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
} else {
keystr = string(txt)
}
keystr = prettyString(keystr)
} else {
// prettyWithFlags will produce already-escaped values
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
if t.Key().Kind() != reflect.String {
// JSON only does string keys. Unlike Go's standard JSON, we'll
// convert just about anything to a string.
keystr = prettyString(keystr)
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
buf.WriteByte('}')
return buf.String()
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return "null"
}
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
}
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
}
func prettyString(s string) string {
// Avoid escaping (which does allocations) if we can.
if needsEscape(s) {
return strconv.Quote(s)
}
b := bytes.NewBuffer(make([]byte, 0, 1024))
b.WriteByte('"')
b.WriteString(s)
b.WriteByte('"')
return b.String()
}
// needsEscape determines whether the input string needs to be escaped or not,
// without doing any allocations.
func needsEscape(s string) bool {
for _, r := range s {
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
return true
}
}
return false
}
func isEmpty(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return m.MarshalLog()
}
func invokeStringer(s fmt.Stringer) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return s.String()
}
func invokeError(e error) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return e.Error()
}
// Caller represents the original call site for a log line, after considering
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
// Line fields will always be provided, while the Func field is optional.
// Users can set the render hook fields in Options to examine logged key-value
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
// field is enabled for the given MessageClass.
type Caller struct {
// File is the basename of the file for this call site.
File string `json:"file"`
// Line is the line number in the file for this call site.
Line int `json:"line"`
// Func is the function name for this call site, or empty if
// Options.LogCallerFunc is not enabled.
Func string `json:"function,omitempty"`
}
func (f Formatter) caller() Caller {
// +1 for this frame, +1 for Info/Error.
pc, file, line, ok := runtime.Caller(f.depth + 2)
if !ok {
return Caller{"<unknown>", 0, ""}
}
fn := ""
if f.opts.LogCallerFunc {
if fp := runtime.FuncForPC(pc); fp != nil {
fn = fp.Name()
}
}
return Caller{filepath.Base(file), line, fn}
}
const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string {
const snipLen = 16
snip := f.pretty(v)
if len(snip) > snipLen {
snip = snip[:snipLen]
}
return snip
}
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
_, ok := kvList[i].(string)
if !ok {
kvList[i] = f.nonStringKey(kvList[i])
}
}
return kvList
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
func (f *Formatter) Init(info logr.RuntimeInfo) {
f.depth += info.CallDepth
}
// Enabled checks whether an info message at the given level should be logged.
func (f Formatter) Enabled(level int) bool {
return level <= f.opts.Verbosity
}
// GetDepth returns the current depth of this Formatter. This is useful for
// implementations which do their own caller attribution.
func (f Formatter) GetDepth() int {
return f.depth
}
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
return prefix, f.render(args, kvList)
}
// FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Error {
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
// name elements. Callers should not pass '/' in the provided name string, but
// this library does not actually enforce that.
func (f *Formatter) AddName(name string) {
if len(f.prefix) > 0 {
f.prefix += "/"
}
f.prefix += name
}
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
func (f *Formatter) AddValues(kvList []interface{}) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)
vals := f.values
if hook := f.opts.RenderValuesHook; hook != nil {
vals = hook(f.sanitize(vals))
}
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
f.flatten(buf, vals, false, true) // escape user-provided keys
f.valuesStr = buf.String()
}
// AddCallDepth increases the number of stack-frames to skip when attributing
// the log line to a file and line.
func (f *Formatter) AddCallDepth(depth int) {
f.depth += depth
}

View File

@ -4,5 +4,6 @@ Vault API
This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server.
For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo.
For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs).
[![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) [![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api)

View File

@ -52,6 +52,7 @@ const (
EnvRateLimit = "VAULT_RATE_LIMIT" EnvRateLimit = "VAULT_RATE_LIMIT"
EnvHTTPProxy = "VAULT_HTTP_PROXY" EnvHTTPProxy = "VAULT_HTTP_PROXY"
EnvVaultProxyAddr = "VAULT_PROXY_ADDR" EnvVaultProxyAddr = "VAULT_PROXY_ADDR"
EnvVaultDisableRedirects = "VAULT_DISABLE_REDIRECTS"
HeaderIndex = "X-Vault-Index" HeaderIndex = "X-Vault-Index"
HeaderForward = "X-Vault-Forward" HeaderForward = "X-Vault-Forward"
HeaderInconsistent = "X-Vault-Inconsistent" HeaderInconsistent = "X-Vault-Inconsistent"
@ -176,6 +177,16 @@ type Config struct {
// since there will be a performance penalty paid upon each request. // since there will be a performance penalty paid upon each request.
// This feature requires Enterprise server-side. // This feature requires Enterprise server-side.
ReadYourWrites bool ReadYourWrites bool
// DisableRedirects when set to true, will prevent the client from
// automatically following a (single) redirect response to its initial
// request. This behavior may be desirable if using Vault CLI on the server
// side.
//
// Note: Disabling redirect following behavior could cause issues with
// commands such as 'vault operator raft snapshot' as this redirects to the
// primary node.
DisableRedirects bool
} }
// TLSConfig contains the parameters needed to configure TLS on the HTTP client // TLSConfig contains the parameters needed to configure TLS on the HTTP client
@ -340,6 +351,7 @@ func (c *Config) ReadEnvironment() error {
var envSRVLookup bool var envSRVLookup bool
var limit *rate.Limiter var limit *rate.Limiter
var envVaultProxy string var envVaultProxy string
var envVaultDisableRedirects bool
// Parse the environment variables // Parse the environment variables
if v := os.Getenv(EnvVaultAddress); v != "" { if v := os.Getenv(EnvVaultAddress); v != "" {
@ -347,8 +359,6 @@ func (c *Config) ReadEnvironment() error {
} }
if v := os.Getenv(EnvVaultAgentAddr); v != "" { if v := os.Getenv(EnvVaultAgentAddr); v != "" {
envAgentAddress = v envAgentAddress = v
} else if v := os.Getenv(EnvVaultAgentAddress); v != "" {
envAgentAddress = v
} }
if v := os.Getenv(EnvVaultMaxRetries); v != "" { if v := os.Getenv(EnvVaultMaxRetries); v != "" {
maxRetries, err := strconv.ParseUint(v, 10, 32) maxRetries, err := strconv.ParseUint(v, 10, 32)
@ -390,13 +400,7 @@ func (c *Config) ReadEnvironment() error {
var err error var err error
envInsecure, err = strconv.ParseBool(v) envInsecure, err = strconv.ParseBool(v)
if err != nil { if err != nil {
return fmt.Errorf("could not parse VAULT_SKIP_VERIFY") return fmt.Errorf("could not parse %s", EnvVaultSkipVerify)
}
} else if v := os.Getenv(EnvVaultInsecure); v != "" {
var err error
envInsecure, err = strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("could not parse VAULT_INSECURE")
} }
} }
if v := os.Getenv(EnvVaultSRVLookup); v != "" { if v := os.Getenv(EnvVaultSRVLookup); v != "" {
@ -420,6 +424,16 @@ func (c *Config) ReadEnvironment() error {
envVaultProxy = v envVaultProxy = v
} }
if v := os.Getenv(EnvVaultDisableRedirects); v != "" {
var err error
envVaultDisableRedirects, err = strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("could not parse %s", EnvVaultDisableRedirects)
}
c.DisableRedirects = envVaultDisableRedirects
}
// Configure the HTTP clients TLS configuration. // Configure the HTTP clients TLS configuration.
t := &TLSConfig{ t := &TLSConfig{
CACert: envCACert, CACert: envCACert,
@ -470,6 +484,51 @@ func (c *Config) ReadEnvironment() error {
return nil return nil
} }
// ParseAddress transforms the provided address into a url.URL and handles
// the case of Unix domain sockets by setting the DialContext in the
// configuration's HttpClient.Transport. This function must be called with
// c.modifyLock held for write access.
func (c *Config) ParseAddress(address string) (*url.URL, error) {
u, err := url.Parse(address)
if err != nil {
return nil, err
}
c.Address = address
if strings.HasPrefix(address, "unix://") {
// When the address begins with unix://, always change the transport's
// DialContext (to match previous behaviour)
socket := strings.TrimPrefix(address, "unix://")
if transport, ok := c.HttpClient.Transport.(*http.Transport); ok {
transport.DialContext = func(context.Context, string, string) (net.Conn, error) {
return net.Dial("unix", socket)
}
// Since the address points to a unix domain socket, the scheme in the
// *URL would be set to `unix`. The *URL in the client is expected to
// be pointing to the protocol used in the application layer and not to
// the transport layer. Hence, setting the fields accordingly.
u.Scheme = "http"
u.Host = socket
u.Path = ""
} else {
return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport")
}
} else if strings.HasPrefix(c.Address, "unix://") {
// When the address being set does not begin with unix:// but the previous
// address in the Config did, change the transport's DialContext back to
// use the default configuration that cleanhttp uses.
if transport, ok := c.HttpClient.Transport.(*http.Transport); ok {
transport.DialContext = cleanhttp.DefaultPooledTransport().DialContext
}
}
return u, nil
}
func parseRateLimit(val string) (rate float64, burst int, err error) { func parseRateLimit(val string) (rate float64, burst int, err error) {
_, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst)
if err != nil { if err != nil {
@ -542,27 +601,11 @@ func NewClient(c *Config) (*Client, error) {
address = c.AgentAddress address = c.AgentAddress
} }
u, err := url.Parse(address) u, err := c.ParseAddress(address)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if strings.HasPrefix(address, "unix://") {
socket := strings.TrimPrefix(address, "unix://")
transport := c.HttpClient.Transport.(*http.Transport)
transport.DialContext = func(context.Context, string, string) (net.Conn, error) {
return net.Dial("unix", socket)
}
// Since the address points to a unix domain socket, the scheme in the
// *URL would be set to `unix`. The *URL in the client is expected to
// be pointing to the protocol used in the application layer and not to
// the transport layer. Hence, setting the fields accordingly.
u.Scheme = "http"
u.Host = socket
u.Path = ""
}
client := &Client{ client := &Client{
addr: u, addr: u,
config: c, config: c,
@ -621,14 +664,11 @@ func (c *Client) SetAddress(addr string) error {
c.modifyLock.Lock() c.modifyLock.Lock()
defer c.modifyLock.Unlock() defer c.modifyLock.Unlock()
parsedAddr, err := url.Parse(addr) parsedAddr, err := c.config.ParseAddress(addr)
if err != nil { if err != nil {
return errwrap.Wrapf("failed to set address: {{err}}", err) return errwrap.Wrapf("failed to set address: {{err}}", err)
} }
c.config.modifyLock.Lock()
c.config.Address = addr
c.config.modifyLock.Unlock()
c.addr = parsedAddr c.addr = parsedAddr
return nil return nil
} }
@ -720,6 +760,42 @@ func (c *Client) SetMaxRetries(retries int) {
c.config.MaxRetries = retries c.config.MaxRetries = retries
} }
func (c *Client) SetMaxIdleConnections(idle int) {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns = idle
}
func (c *Client) MaxIdleConnections() int {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
return c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns
}
func (c *Client) SetDisableKeepAlives(disable bool) {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives = disable
}
func (c *Client) DisableKeepAlives() bool {
c.modifyLock.RLock()
defer c.modifyLock.RUnlock()
c.config.modifyLock.RLock()
defer c.config.modifyLock.RUnlock()
return c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives
}
func (c *Client) MaxRetries() int { func (c *Client) MaxRetries() int {
c.modifyLock.RLock() c.modifyLock.RLock()
defer c.modifyLock.RUnlock() defer c.modifyLock.RUnlock()
@ -1216,6 +1292,7 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon
outputCurlString := c.config.OutputCurlString outputCurlString := c.config.OutputCurlString
outputPolicy := c.config.OutputPolicy outputPolicy := c.config.OutputPolicy
logger := c.config.Logger logger := c.config.Logger
disableRedirects := c.config.DisableRedirects
c.config.modifyLock.RUnlock() c.config.modifyLock.RUnlock()
c.modifyLock.RUnlock() c.modifyLock.RUnlock()
@ -1309,8 +1386,8 @@ START:
return result, err return result, err
} }
// Check for a redirect, only allowing for a single redirect // Check for a redirect, only allowing for a single redirect (if redirects aren't disabled)
if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 { if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 && !disableRedirects {
// Parse the updated location // Parse the updated location
respLoc, err := resp.Location() respLoc, err := resp.Location()
if err != nil { if err != nil {
@ -1369,6 +1446,7 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo
httpClient := c.config.HttpClient httpClient := c.config.HttpClient
outputCurlString := c.config.OutputCurlString outputCurlString := c.config.OutputCurlString
outputPolicy := c.config.OutputPolicy outputPolicy := c.config.OutputPolicy
disableRedirects := c.config.DisableRedirects
// add headers // add headers
if c.headers != nil { if c.headers != nil {
@ -1441,8 +1519,8 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo
return result, err return result, err
} }
// Check for a redirect, only allowing for a single redirect // Check for a redirect, only allowing for a single redirect, if redirects aren't disabled
if resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307 { if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && !disableRedirects {
// Parse the updated location // Parse the updated location
respLoc, err := resp.Location() respLoc, err := resp.Location()
if err != nil { if err != nil {

View File

@ -1,5 +1,11 @@
package api package api
import "errors"
// ErrSecretNotFound is returned by KVv1 and KVv2 wrappers to indicate that the
// secret is missing at the given location.
var ErrSecretNotFound = errors.New("secret not found")
// A KVSecret is a key-value secret returned by Vault's KV secrets engine, // A KVSecret is a key-value secret returned by Vault's KV secrets engine,
// and is the most basic type of secret stored in Vault. // and is the most basic type of secret stored in Vault.
// //

View File

@ -19,7 +19,7 @@ func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) {
return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err)
} }
if secret == nil { if secret == nil {
return nil, fmt.Errorf("no secret found at %s", pathToRead) return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead)
} }
return &KVSecret{ return &KVSecret{

View File

@ -2,7 +2,9 @@ package api
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http"
"sort" "sort"
"strconv" "strconv"
"time" "time"
@ -115,7 +117,7 @@ func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) {
return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err)
} }
if secret == nil { if secret == nil {
return nil, fmt.Errorf("no secret found at %s", pathToRead) return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead)
} }
kvSecret, err := extractDataAndVersionMetadata(secret) kvSecret, err := extractDataAndVersionMetadata(secret)
@ -123,11 +125,7 @@ func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) {
return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err)
} }
cm, err := extractCustomMetadata(secret) kvSecret.CustomMetadata = extractCustomMetadata(secret)
if err != nil {
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err)
}
kvSecret.CustomMetadata = cm
return kvSecret, nil return kvSecret, nil
} }
@ -149,7 +147,7 @@ func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int)
return nil, err return nil, err
} }
if secret == nil { if secret == nil {
return nil, fmt.Errorf("no secret with version %d found at %s", version, pathToRead) return nil, fmt.Errorf("%w: for version %d at %s", ErrSecretNotFound, version, pathToRead)
} }
kvSecret, err := extractDataAndVersionMetadata(secret) kvSecret, err := extractDataAndVersionMetadata(secret)
@ -157,11 +155,7 @@ func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int)
return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err)
} }
cm, err := extractCustomMetadata(secret) kvSecret.CustomMetadata = extractCustomMetadata(secret)
if err != nil {
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err)
}
kvSecret.CustomMetadata = cm
return kvSecret, nil return kvSecret, nil
} }
@ -175,7 +169,7 @@ func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVV
return nil, err return nil, err
} }
if secret == nil || secret.Data == nil { if secret == nil || secret.Data == nil {
return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead)
} }
md, err := extractFullMetadata(secret) md, err := extractFullMetadata(secret)
@ -202,7 +196,7 @@ func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata
return nil, err return nil, err
} }
if secret == nil || secret.Data == nil { if secret == nil || secret.Data == nil {
return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead)
} }
md, err := extractFullMetadata(secret) md, err := extractFullMetadata(secret)
@ -244,7 +238,7 @@ func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]inte
return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err)
} }
if secret == nil { if secret == nil {
return nil, fmt.Errorf("no secret was written to %s", pathToWriteTo) return nil, fmt.Errorf("%w: after writing to %s", ErrSecretNotFound, pathToWriteTo)
} }
metadata, err := extractVersionMetadata(secret) metadata, err := extractVersionMetadata(secret)
@ -258,11 +252,7 @@ func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]inte
Raw: secret, Raw: secret,
} }
cm, err := extractCustomMetadata(secret) kvSecret.CustomMetadata = extractCustomMetadata(secret)
if err != nil {
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToWriteTo, err)
}
kvSecret.CustomMetadata = cm
return kvSecret, nil return kvSecret, nil
} }
@ -325,19 +315,19 @@ func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string
// Determine which kind of patch to use, // Determine which kind of patch to use,
// the newer HTTP Patch style or the older read-then-write style // the newer HTTP Patch style or the older read-then-write style
var kvs *KVSecret var kvs *KVSecret
var perr error var err error
switch patchMethod { switch patchMethod {
case "rw": case "rw":
kvs, perr = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) kvs, err = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData)
case "patch": case "patch":
kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...)
case "": case "":
kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...)
default: default:
return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"")
} }
if perr != nil { if err != nil {
return nil, fmt.Errorf("unable to perform patch: %w", perr) return nil, fmt.Errorf("unable to perform patch: %w", err)
} }
if kvs == nil { if kvs == nil {
return nil, fmt.Errorf("no secret was written to %s", secretPath) return nil, fmt.Errorf("no secret was written to %s", secretPath)
@ -478,7 +468,7 @@ func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int)
// Now run it again and read the version we want to roll back to // Now run it again and read the version we want to roll back to
rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to get previous version %d of secret: %s", toVersion, err) return nil, fmt.Errorf("unable to get previous version %d of secret: %w", toVersion, err)
} }
err = validateRollbackVersion(rollbackVersion) err = validateRollbackVersion(rollbackVersion)
@ -495,30 +485,24 @@ func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int)
return kvs, nil return kvs, nil
} }
func extractCustomMetadata(secret *Secret) (map[string]interface{}, error) { func extractCustomMetadata(secret *Secret) map[string]interface{} {
// Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key
customMetadataInterface, ok := secret.Data["custom_metadata"] customMetadataInterface, ok := secret.Data["custom_metadata"]
if !ok { if !ok {
metadataInterface, ok := secret.Data["metadata"] metadataInterface := secret.Data["metadata"]
if !ok { // if that's not found, bail since it should have had one or the other
return nil, fmt.Errorf("secret is missing expected fields")
}
metadataMap, ok := metadataInterface.(map[string]interface{}) metadataMap, ok := metadataInterface.(map[string]interface{})
if !ok { if !ok {
return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) return nil
}
customMetadataInterface, ok = metadataMap["custom_metadata"]
if !ok {
return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\": %v", metadataMap)
} }
customMetadataInterface = metadataMap["custom_metadata"]
} }
cm, ok := customMetadataInterface.(map[string]interface{}) cm, ok := customMetadataInterface.(map[string]interface{})
if !ok && customMetadataInterface != nil { if !ok {
return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", customMetadataInterface, customMetadataInterface) return nil
} }
return cm, nil return cm
} }
func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) {
@ -687,18 +671,28 @@ func mergePatch(ctx context.Context, client *Client, mountPath string, secretPat
secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData)
if err != nil { if err != nil {
var re *ResponseError
if errors.As(err, &re) {
switch re.StatusCode {
// 403
case http.StatusForbidden:
return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err)
// 404
case http.StatusNotFound:
return nil, fmt.Errorf("%w: performing merge patch to %s", ErrSecretNotFound, pathToMergePatch)
// 405
case http.StatusMethodNotAllowed:
// If it's a 405, that probably means the server is running a pre-1.9 // If it's a 405, that probably means the server is running a pre-1.9
// Vault version that doesn't support the HTTP PATCH method. // Vault version that doesn't support the HTTP PATCH method.
// Fall back to the old way of doing it. // Fall back to the old way of doing it.
if re, ok := err.(*ResponseError); ok && re.StatusCode == 405 {
return readThenWrite(ctx, client, mountPath, secretPath, newData) return readThenWrite(ctx, client, mountPath, secretPath, newData)
} }
if re, ok := err.(*ResponseError); ok && re.StatusCode == 403 {
return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err)
} }
return nil, fmt.Errorf("error performing merge patch to %s: %s", pathToMergePatch, err) return nil, fmt.Errorf("error performing merge patch to %s: %w", pathToMergePatch, err)
} }
metadata, err := extractVersionMetadata(secret) metadata, err := extractVersionMetadata(secret)
@ -712,11 +706,7 @@ func mergePatch(ctx context.Context, client *Client, mountPath string, secretPat
Raw: secret, Raw: secret,
} }
cm, err := extractCustomMetadata(secret) kvSecret.CustomMetadata = extractCustomMetadata(secret)
if err != nil {
return nil, fmt.Errorf("error reading custom metadata for secret %s: %w", secretPath, err)
}
kvSecret.CustomMetadata = cm
return kvSecret, nil return kvSecret, nil
} }
@ -730,7 +720,7 @@ func readThenWrite(ctx context.Context, client *Client, mountPath string, secret
// Make sure the secret already exists // Make sure the secret already exists
if existingVersion == nil || existingVersion.Data == nil { if existingVersion == nil || existingVersion.Data == nil {
return nil, fmt.Errorf("no existing secret was found at %s when doing read-then-write patch operation: %w", secretPath, err) return nil, fmt.Errorf("%w: at %s as part of read-then-write patch operation", ErrSecretNotFound, secretPath)
} }
// Verify existing secret has metadata // Verify existing secret has metadata

View File

@ -69,7 +69,6 @@ const (
// } // }
// } // }
// //
//
// `DoneCh` will return if renewal fails, or if the remaining lease duration is // `DoneCh` will return if renewal fails, or if the remaining lease duration is
// under a built-in threshold and either renewing is not extending it or // under a built-in threshold and either renewing is not extending it or
// renewing is disabled. In both cases, the caller should attempt a re-read of // renewing is disabled. In both cases, the caller should attempt a re-read of
@ -251,7 +250,8 @@ func (r *LifetimeWatcher) doRenew() error {
} }
func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string,
renew renewFunc, initialRetryInterval time.Duration) error { renew renewFunc, initialRetryInterval time.Duration,
) error {
if credString == "" || if credString == "" ||
(nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) {
return r.errLifetimeWatcherNotRenewable return r.errLifetimeWatcherNotRenewable

View File

@ -60,19 +60,19 @@ func (d *OutputStringError) buildCurlString() (string, error) {
finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method)
} }
if d.ClientCACert != "" { if d.ClientCACert != "" {
clientCACert := strings.Replace(d.ClientCACert, "'", "'\"'\"'", -1) clientCACert := strings.ReplaceAll(d.ClientCACert, "'", "'\"'\"'")
finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert)
} }
if d.ClientCAPath != "" { if d.ClientCAPath != "" {
clientCAPath := strings.Replace(d.ClientCAPath, "'", "'\"'\"'", -1) clientCAPath := strings.ReplaceAll(d.ClientCAPath, "'", "'\"'\"'")
finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath)
} }
if d.ClientCert != "" { if d.ClientCert != "" {
clientCert := strings.Replace(d.ClientCert, "'", "'\"'\"'", -1) clientCert := strings.ReplaceAll(d.ClientCert, "'", "'\"'\"'")
finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert)
} }
if d.ClientKey != "" { if d.ClientKey != "" {
clientKey := strings.Replace(d.ClientKey, "'", "'\"'\"'", -1) clientKey := strings.ReplaceAll(d.ClientKey, "'", "'\"'\"'")
finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey)
} }
for k, v := range d.Request.Header { for k, v := range d.Request.Header {
@ -87,7 +87,7 @@ func (d *OutputStringError) buildCurlString() (string, error) {
if len(body) > 0 { if len(body) > 0 {
// We need to escape single quotes since that's what we're using to // We need to escape single quotes since that's what we're using to
// quote the body // quote the body
escapedBody := strings.Replace(string(body), "'", "'\"'\"'", -1) escapedBody := strings.ReplaceAll(string(body), "'", "'\"'\"'")
finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody)
} }

View File

@ -16,7 +16,11 @@ import (
"github.com/hashicorp/errwrap" "github.com/hashicorp/errwrap"
) )
var ( const (
// PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override
// setting a TLSProviderFunc for a plugin.
PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED"
// PluginMetadataModeEnv is an ENV name used to disable TLS communication // PluginMetadataModeEnv is an ENV name used to disable TLS communication
// to bootstrap mounting plugins. // to bootstrap mounting plugins.
PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE"
@ -24,14 +28,15 @@ var (
// PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the
// plugin. // plugin.
PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN"
)
// sudoPaths is a map containing the paths that require a token's policy // sudoPaths is a map containing the paths that require a token's policy
// to have the "sudo" capability. The keys are the paths as strings, in // to have the "sudo" capability. The keys are the paths as strings, in
// the same format as they are returned by the OpenAPI spec. The values // the same format as they are returned by the OpenAPI spec. The values
// are the regular expressions that can be used to test whether a given // are the regular expressions that can be used to test whether a given
// path matches that path or not (useful specifically for the paths that // path matches that path or not (useful specifically for the paths that
// contain templated fields.) // contain templated fields.)
sudoPaths = map[string]*regexp.Regexp{ var sudoPaths = map[string]*regexp.Regexp{
"/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`),
"/pki/root": regexp.MustCompile(`^/pki/root$`), "/pki/root": regexp.MustCompile(`^/pki/root$`),
"/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`),
@ -66,8 +71,7 @@ var (
"/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`),
"/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`),
"/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`),
} }
)
// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections // PluginAPIClientMeta is a helper that plugins can use to configure TLS connections
// back to Vault. // back to Vault.
@ -120,7 +124,7 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error)
// VaultPluginTLSProviderContext is run inside a plugin and retrieves the response // VaultPluginTLSProviderContext is run inside a plugin and retrieves the response
// wrapped TLS certificate from vault. It returns a configured TLS Config. // wrapped TLS certificate from vault. It returns a configured TLS Config.
func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) {
if os.Getenv(PluginMetadataModeEnv) == "true" { if os.Getenv(PluginAutoMTLSEnv) == "true" || os.Getenv(PluginMetadataModeEnv) == "true" {
return nil return nil
} }

View File

@ -85,11 +85,10 @@ func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.
} }
// Returns true if any of the following conditions are true: // Returns true if any of the following conditions are true:
// * CA cert is configured // - CA cert is configured
// * CA path is configured // - CA path is configured
// * configured to skip certificate verification // - configured to skip certificate verification
// * TLS server name is configured // - TLS server name is configured
//
func (c *SSHHelperConfig) shouldSetTLSParameters() bool { func (c *SSHHelperConfig) shouldSetTLSParameters() bool {
return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify
} }

View File

@ -87,7 +87,8 @@ func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, erro
// DEPRECATED: Use EnableAuditWithOptions instead // DEPRECATED: Use EnableAuditWithOptions instead
func (c *Sys) EnableAudit( func (c *Sys) EnableAudit(
path string, auditType string, desc string, opts map[string]string) error { path string, auditType string, desc string, opts map[string]string,
) error {
return c.EnableAuditWithOptions(path, &EnableAuditOptions{ return c.EnableAuditWithOptions(path, &EnableAuditOptions{
Type: auditType, Type: auditType,
Description: desc, Description: desc,

View File

@ -266,6 +266,7 @@ type MountConfigInput struct {
AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"`
TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` TokenType string `json:"token_type,omitempty" mapstructure:"token_type"`
AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"`
PluginVersion string `json:"plugin_version,omitempty"`
// Deprecated: This field will always be blank for newer server responses. // Deprecated: This field will always be blank for newer server responses.
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
@ -281,6 +282,10 @@ type MountOutput struct {
Local bool `json:"local"` Local bool `json:"local"`
SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"`
PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"`
RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"`
RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"`
DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"`
} }
type MountConfigOutput struct { type MountConfigOutput struct {

View File

@ -22,6 +22,8 @@ type ListPluginsResponse struct {
// PluginsByType is the list of plugins by type. // PluginsByType is the list of plugins by type.
PluginsByType map[consts.PluginType][]string `json:"types"` PluginsByType map[consts.PluginType][]string `json:"types"`
Details []PluginDetails `json:"details,omitempty"`
// Names is the list of names of the plugins. // Names is the list of names of the plugins.
// //
// Deprecated: Newer server responses should be returning PluginsByType (json: // Deprecated: Newer server responses should be returning PluginsByType (json:
@ -29,6 +31,14 @@ type ListPluginsResponse struct {
Names []string `json:"names"` Names []string `json:"names"`
} }
type PluginDetails struct {
Type string `json:"type"`
Name string `json:"name"`
Version string `json:"version,omitempty"`
Builtin bool `json:"builtin"`
DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"`
}
// ListPlugins wraps ListPluginsWithContext using context.Background. // ListPlugins wraps ListPluginsWithContext using context.Background.
func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) {
return c.ListPluginsWithContext(context.Background(), i) return c.ListPluginsWithContext(context.Background(), i)
@ -40,25 +50,7 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
defer cancelFunc() defer cancelFunc()
path := "" resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/catalog"))
method := ""
if i.Type == consts.PluginTypeUnknown {
path = "/v1/sys/plugins/catalog"
method = http.MethodGet
} else {
path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Type)
method = "LIST"
}
req := c.c.NewRequest(method, path)
if method == "LIST" {
// Set this for broader compatibility, but we use LIST above to be able
// to handle the wrapping lookup function
req.Method = http.MethodGet
req.Params.Set("list", "true")
}
resp, err := c.c.rawRequestWithContext(ctx, req)
if err != nil && resp == nil { if err != nil && resp == nil {
return nil, err return nil, err
} }
@ -67,27 +59,6 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
} }
defer resp.Body.Close() defer resp.Body.Close()
// We received an Unsupported Operation response from Vault, indicating
// Vault of an older version that doesn't support the GET method yet;
// switch it to a LIST.
if resp.StatusCode == 405 {
req.Params.Set("list", "true")
resp, err := c.c.rawRequestWithContext(ctx, req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result struct {
Data struct {
Keys []string `json:"keys"`
} `json:"data"`
}
if err := resp.DecodeJSON(&result); err != nil {
return nil, err
}
return &ListPluginsResponse{Names: result.Data.Keys}, nil
}
secret, err := ParseSecret(resp.Body) secret, err := ParseSecret(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
@ -99,7 +70,8 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
result := &ListPluginsResponse{ result := &ListPluginsResponse{
PluginsByType: make(map[consts.PluginType][]string), PluginsByType: make(map[consts.PluginType][]string),
} }
if i.Type == consts.PluginTypeUnknown { switch i.Type {
case consts.PluginTypeUnknown:
for _, pluginType := range consts.PluginTypes { for _, pluginType := range consts.PluginTypes {
pluginsRaw, ok := secret.Data[pluginType.String()] pluginsRaw, ok := secret.Data[pluginType.String()]
if !ok { if !ok {
@ -121,14 +93,38 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (
} }
result.PluginsByType[pluginType] = plugins result.PluginsByType[pluginType] = plugins
} }
} else { default:
pluginsRaw, ok := secret.Data[i.Type.String()]
if !ok {
return nil, fmt.Errorf("no %s entry in returned data", i.Type.String())
}
var respKeys []string var respKeys []string
if err := mapstructure.Decode(secret.Data["keys"], &respKeys); err != nil { if err := mapstructure.Decode(pluginsRaw, &respKeys); err != nil {
return nil, err return nil, err
} }
result.PluginsByType[i.Type] = respKeys result.PluginsByType[i.Type] = respKeys
} }
if detailed, ok := secret.Data["detailed"]; ok {
var details []PluginDetails
if err := mapstructure.Decode(detailed, &details); err != nil {
return nil, err
}
switch i.Type {
case consts.PluginTypeUnknown:
result.Details = details
default:
// Filter for just the queried type.
for _, entry := range details {
if entry.Type == i.Type.String() {
result.Details = append(result.Details, entry)
}
}
}
}
return result, nil return result, nil
} }
@ -138,6 +134,7 @@ type GetPluginInput struct {
// Type of the plugin. Required. // Type of the plugin. Required.
Type consts.PluginType `json:"type"` Type consts.PluginType `json:"type"`
Version string `json:"version"`
} }
// GetPluginResponse is the response from the GetPlugin call. // GetPluginResponse is the response from the GetPlugin call.
@ -147,6 +144,8 @@ type GetPluginResponse struct {
Command string `json:"command"` Command string `json:"command"`
Name string `json:"name"` Name string `json:"name"`
SHA256 string `json:"sha256"` SHA256 string `json:"sha256"`
DeprecationStatus string `json:"deprecation_status,omitempty"`
Version string `json:"version,omitempty"`
} }
// GetPlugin wraps GetPluginWithContext using context.Background. // GetPlugin wraps GetPluginWithContext using context.Background.
@ -161,6 +160,9 @@ func (c *Sys) GetPluginWithContext(ctx context.Context, i *GetPluginInput) (*Get
path := catalogPathByType(i.Type, i.Name) path := catalogPathByType(i.Type, i.Name)
req := c.c.NewRequest(http.MethodGet, path) req := c.c.NewRequest(http.MethodGet, path)
if i.Version != "" {
req.Params.Set("version", i.Version)
}
resp, err := c.c.rawRequestWithContext(ctx, req) resp, err := c.c.rawRequestWithContext(ctx, req)
if err != nil { if err != nil {
@ -194,6 +196,9 @@ type RegisterPluginInput struct {
// SHA256 is the shasum of the plugin. // SHA256 is the shasum of the plugin.
SHA256 string `json:"sha256,omitempty"` SHA256 string `json:"sha256,omitempty"`
// Version is the optional version of the plugin being registered
Version string `json:"version,omitempty"`
} }
// RegisterPlugin wraps RegisterPluginWithContext using context.Background. // RegisterPlugin wraps RegisterPluginWithContext using context.Background.
@ -227,6 +232,9 @@ type DeregisterPluginInput struct {
// Type of the plugin. Required. // Type of the plugin. Required.
Type consts.PluginType `json:"type"` Type consts.PluginType `json:"type"`
// Version of the plugin. Optional.
Version string `json:"version,omitempty"`
} }
// DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. // DeregisterPlugin wraps DeregisterPluginWithContext using context.Background.
@ -242,7 +250,7 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug
path := catalogPathByType(i.Type, i.Name) path := catalogPathByType(i.Type, i.Name)
req := c.c.NewRequest(http.MethodDelete, path) req := c.c.NewRequest(http.MethodDelete, path)
req.Params.Set("version", i.Version)
resp, err := c.c.rawRequestWithContext(ctx, req) resp, err := c.c.rawRequestWithContext(ctx, req)
if err == nil { if err == nil {
defer resp.Body.Close() defer resp.Body.Close()

View File

@ -107,6 +107,8 @@ type SealStatusResponse struct {
ClusterID string `json:"cluster_id,omitempty"` ClusterID string `json:"cluster_id,omitempty"`
RecoverySeal bool `json:"recovery_seal"` RecoverySeal bool `json:"recovery_seal"`
StorageType string `json:"storage_type,omitempty"` StorageType string `json:"storage_type,omitempty"`
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
} }
type UnsealOpts struct { type UnsealOpts struct {

View File

@ -49,6 +49,26 @@ var expectedNISTPCurveHashBits = map[int]int{
521: 512, 521: 512,
} }
// Mapping of constant names<->constant values for SignatureAlgorithm
var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{
"sha256withrsa": x509.SHA256WithRSA,
"sha384withrsa": x509.SHA384WithRSA,
"sha512withrsa": x509.SHA512WithRSA,
"ecdsawithsha256": x509.ECDSAWithSHA256,
"ecdsawithsha384": x509.ECDSAWithSHA384,
"ecdsawithsha512": x509.ECDSAWithSHA512,
"sha256withrsapss": x509.SHA256WithRSAPSS,
"sha384withrsapss": x509.SHA384WithRSAPSS,
"sha512withrsapss": x509.SHA512WithRSAPSS,
"pureed25519": x509.PureEd25519,
"ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix.
}
// OID for RFC 5280 Delta CRL Indicator CRL extension.
//
// > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 }
var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27})
// GetHexFormatted returns the byte buffer formatted in hex with // GetHexFormatted returns the byte buffer formatted in hex with
// the specified separator between bytes. // the specified separator between bytes.
func GetHexFormatted(buf []byte, sep string) string { func GetHexFormatted(buf []byte, sep string) string {
@ -87,6 +107,16 @@ func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) {
return getSubjectKeyID(privateKey.Public()) return getSubjectKeyID(privateKey.Public())
} }
// Returns the explicit SKID when used for cross-signing, else computes a new
// SKID from the key itself.
func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) {
if len(data.Params.SKID) > 0 {
return data.Params.SKID, nil
}
return getSubjectKeyID(data.CSR.PublicKey)
}
func getSubjectKeyID(pub interface{}) ([]byte, error) { func getSubjectKeyID(pub interface{}) ([]byte, error) {
var publicKeyBytes []byte var publicKeyBytes []byte
switch pub := pub.(type) { switch pub := pub.(type) {
@ -151,18 +181,21 @@ func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) {
} }
func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) { func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) {
if signer, err = x509.ParseECPrivateKey(privateKeyBytes); err == nil { var firstError error
if signer, firstError = x509.ParseECPrivateKey(privateKeyBytes); firstError == nil {
format = ECBlock format = ECBlock
return return
} }
if signer, err = x509.ParsePKCS1PrivateKey(privateKeyBytes); err == nil { var secondError error
if signer, secondError = x509.ParsePKCS1PrivateKey(privateKeyBytes); secondError == nil {
format = PKCS1Block format = PKCS1Block
return return
} }
var thirdError error
var rawKey interface{} var rawKey interface{}
if rawKey, err = x509.ParsePKCS8PrivateKey(privateKeyBytes); err == nil { if rawKey, thirdError = x509.ParsePKCS8PrivateKey(privateKeyBytes); thirdError == nil {
switch rawSigner := rawKey.(type) { switch rawSigner := rawKey.(type) {
case *rsa.PrivateKey: case *rsa.PrivateKey:
signer = rawSigner signer = rawSigner
@ -178,7 +211,7 @@ func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType
return return
} }
return nil, UnknownBlock, err return nil, UnknownBlock, fmt.Errorf("got errors attempting to parse DER private key:\n1. %v\n2. %v\n3. %v", firstError, secondError, thirdError)
} }
func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) { func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) {
@ -756,6 +789,29 @@ func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reade
return createCertificate(data, randReader, keyGenerator) return createCertificate(data, randReader, keyGenerator)
} }
// Set correct correct RSA sig algo
func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) {
if data.Params.UsePSS {
switch data.Params.SignatureBits {
case 256:
certTemplate.SignatureAlgorithm = x509.SHA256WithRSAPSS
case 384:
certTemplate.SignatureAlgorithm = x509.SHA384WithRSAPSS
case 512:
certTemplate.SignatureAlgorithm = x509.SHA512WithRSAPSS
}
} else {
switch data.Params.SignatureBits {
case 256:
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
case 384:
certTemplate.SignatureAlgorithm = x509.SHA384WithRSA
case 512:
certTemplate.SignatureAlgorithm = x509.SHA512WithRSA
}
}
}
func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) {
var err error var err error
result := &ParsedCertBundle{} result := &ParsedCertBundle{}
@ -824,14 +880,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen
if data.SigningBundle != nil { if data.SigningBundle != nil {
switch data.SigningBundle.PrivateKeyType { switch data.SigningBundle.PrivateKeyType {
case RSAPrivateKey: case RSAPrivateKey:
switch data.Params.SignatureBits { certTemplateSetSigAlgo(certTemplate, data)
case 256:
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
case 384:
certTemplate.SignatureAlgorithm = x509.SHA384WithRSA
case 512:
certTemplate.SignatureAlgorithm = x509.SHA512WithRSA
}
case Ed25519PrivateKey: case Ed25519PrivateKey:
certTemplate.SignatureAlgorithm = x509.PureEd25519 certTemplate.SignatureAlgorithm = x509.PureEd25519
case ECPrivateKey: case ECPrivateKey:
@ -853,14 +902,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen
switch data.Params.KeyType { switch data.Params.KeyType {
case "rsa": case "rsa":
switch data.Params.SignatureBits { certTemplateSetSigAlgo(certTemplate, data)
case 256:
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
case 384:
certTemplate.SignatureAlgorithm = x509.SHA384WithRSA
case 512:
certTemplate.SignatureAlgorithm = x509.SHA512WithRSA
}
case "ed25519": case "ed25519":
certTemplate.SignatureAlgorithm = x509.PureEd25519 certTemplate.SignatureAlgorithm = x509.PureEd25519
case "ec": case "ec":
@ -1066,7 +1108,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun
return nil, err return nil, err
} }
subjKeyID, err := getSubjectKeyID(data.CSR.PublicKey) subjKeyID, err := getSubjectKeyIDFromBundle(data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1087,14 +1129,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun
switch data.SigningBundle.PrivateKeyType { switch data.SigningBundle.PrivateKeyType {
case RSAPrivateKey: case RSAPrivateKey:
switch data.Params.SignatureBits { certTemplateSetSigAlgo(certTemplate, data)
case 256:
certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
case 384:
certTemplate.SignatureAlgorithm = x509.SHA384WithRSA
case 512:
certTemplate.SignatureAlgorithm = x509.SHA512WithRSA
}
case ECPrivateKey: case ECPrivateKey:
switch data.Params.SignatureBits { switch data.Params.SignatureBits {
case 256: case 256:
@ -1266,3 +1301,26 @@ func CreateKeyBundleWithKeyGenerator(keyType string, keyBits int, randReader io.
} }
return result, nil return result, nil
} }
// CreateDeltaCRLIndicatorExt allows creating correctly formed delta CRLs
// that point back to the last complete CRL that they're based on.
func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) {
bigNum := big.NewInt(completeCRLNumber)
bigNumValue, err := asn1.Marshal(bigNum)
if err != nil {
return pkix.Extension{}, fmt.Errorf("unable to marshal complete CRL number (%v): %v", completeCRLNumber, err)
}
return pkix.Extension{
Id: DeltaCRLIndicatorOID,
// > When a conforming CRL issuer generates a delta CRL, the delta
// > CRL MUST include a critical delta CRL indicator extension.
Critical: true,
// This extension only includes the complete CRL number:
//
// > BaseCRLNumber ::= CRLNumber
//
// But, this needs to be encoded as a big number for encoding/asn1
// to work properly.
Value: bigNumValue,
}, nil
}

View File

@ -710,6 +710,7 @@ type CAInfoBundle struct {
ParsedCertBundle ParsedCertBundle
URLs *URLEntries URLs *URLEntries
LeafNotAfterBehavior NotAfterBehavior LeafNotAfterBehavior NotAfterBehavior
RevocationSigAlg x509.SignatureAlgorithm
} }
func (b *CAInfoBundle) GetCAChain() []*CertBlock { func (b *CAInfoBundle) GetCAChain() []*CertBlock {
@ -782,6 +783,7 @@ type CreationParameters struct {
PolicyIdentifiers []string PolicyIdentifiers []string
BasicConstraintsValidForNonCA bool BasicConstraintsValidForNonCA bool
SignatureBits int SignatureBits int
UsePSS bool
ForceAppendCaChain bool ForceAppendCaChain bool
// Only used when signing a CA cert // Only used when signing a CA cert
@ -796,6 +798,9 @@ type CreationParameters struct {
// The duration the certificate will use NotBefore // The duration the certificate will use NotBefore
NotBeforeDuration time.Duration NotBeforeDuration time.Duration
// The explicit SKID to use; especially useful for cross-signing.
SKID []byte
} }
type CreationBundle struct { type CreationBundle struct {

View File

@ -0,0 +1,31 @@
package consts
const VaultAllowPendingRemovalMountsEnv = "VAULT_ALLOW_PENDING_REMOVAL_MOUNTS"
// DeprecationStatus represents the current deprecation state for builtins
type DeprecationStatus uint32
// These are the states of deprecation for builtin plugins
const (
Supported = iota
Deprecated
PendingRemoval
Removed
Unknown
)
// String returns the string representation of a builtin deprecation status
func (s DeprecationStatus) String() string {
switch s {
case Supported:
return "supported"
case Deprecated:
return "deprecated"
case PendingRemoval:
return "pending removal"
case Removed:
return "removed"
default:
return ""
}
}

View File

@ -25,7 +25,6 @@ type LockEntry struct {
// Lock B, Lock A // Lock B, Lock A
// //
// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A // Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A
//
func CreateLocks() []*LockEntry { func CreateLocks() []*LockEntry {
ret := make([]*LockEntry, LockCount) ret := make([]*LockEntry, LockCount)
for i := range ret { for i := range ret {

View File

@ -43,6 +43,7 @@ func NewVaultLogger(level log.Level) log.Logger {
func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger {
opts := &log.LoggerOptions{ opts := &log.LoggerOptions{
Level: level, Level: level,
IndependentLevels: true,
Output: w, Output: w,
JSONFormat: ParseEnvLogFormat() == JSONFormat, JSONFormat: ParseEnvLogFormat() == JSONFormat,
} }

View File

@ -7,7 +7,11 @@ import (
version "github.com/hashicorp/go-version" version "github.com/hashicorp/go-version"
) )
var ( const (
// PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override
// setting a TLSProviderFunc for a plugin.
PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED"
// PluginMlockEnabled is the ENV name used to pass the configuration for // PluginMlockEnabled is the ENV name used to pass the configuration for
// enabling mlock // enabling mlock
PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED"
@ -27,6 +31,10 @@ var (
// PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded
// string. Used for testing. // string. Used for testing.
PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM"
// PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names
// opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues
PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT"
) )
// OptionallyEnableMlock determines if mlock should be called, and if so enables // OptionallyEnableMlock determines if mlock should be called, and if so enables

View File

@ -1,12 +1,16 @@
package pluginutil package pluginutil
import ( import (
context "context" "context"
"fmt" "fmt"
"os"
"strings"
grpc "google.golang.org/grpc" "github.com/hashicorp/go-secure-stdlib/strutil"
codes "google.golang.org/grpc/codes" "google.golang.org/grpc"
status "google.golang.org/grpc/status" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
) )
type PluginMultiplexingServerImpl struct { type PluginMultiplexingServerImpl struct {
@ -15,17 +19,22 @@ type PluginMultiplexingServerImpl struct {
Supported bool Supported bool
} }
func (pm PluginMultiplexingServerImpl) MultiplexingSupport(ctx context.Context, req *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { func (pm PluginMultiplexingServerImpl) MultiplexingSupport(_ context.Context, _ *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) {
return &MultiplexingSupportResponse{ return &MultiplexingSupportResponse{
Supported: pm.Supported, Supported: pm.Supported,
}, nil }, nil
} }
func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface) (bool, error) { func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, name string) (bool, error) {
if cc == nil { if cc == nil {
return false, fmt.Errorf("client connection is nil") return false, fmt.Errorf("client connection is nil")
} }
out := strings.Split(os.Getenv(PluginMultiplexingOptOut), ",")
if strutil.StrListContains(out, name) {
return false, nil
}
req := new(MultiplexingSupportRequest) req := new(MultiplexingSupportRequest)
resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req) resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req)
if err != nil { if err != nil {
@ -45,3 +54,22 @@ func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface) (bo
return resp.Supported, nil return resp.Supported, nil
} }
func GetMultiplexIDFromContext(ctx context.Context) (string, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return "", fmt.Errorf("missing plugin multiplexing metadata")
}
multiplexIDs := md[MultiplexingCtxKey]
if len(multiplexIDs) != 1 {
return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs))
}
multiplexID := multiplexIDs[0]
if multiplexID == "" {
return "", fmt.Errorf("empty multiplex ID in metadata")
}
return multiplexID, nil
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.27.1 // protoc-gen-go v1.28.1
// protoc v3.19.4 // protoc v3.21.5
// source: sdk/helper/pluginutil/multiplexing.proto // source: sdk/helper/pluginutil/multiplexing.proto
package pluginutil package pluginutil

View File

@ -16,12 +16,14 @@ import (
type PluginClientConfig struct { type PluginClientConfig struct {
Name string Name string
PluginType consts.PluginType PluginType consts.PluginType
Version string
PluginSets map[int]plugin.PluginSet PluginSets map[int]plugin.PluginSet
HandshakeConfig plugin.HandshakeConfig HandshakeConfig plugin.HandshakeConfig
Logger log.Logger Logger log.Logger
IsMetadataMode bool IsMetadataMode bool
AutoMTLS bool AutoMTLS bool
MLock bool MLock bool
Wrapper RunnerUtil
} }
type runConfig struct { type runConfig struct {
@ -33,8 +35,6 @@ type runConfig struct {
// Initialized with what's in PluginRunner.Env, but can be added to // Initialized with what's in PluginRunner.Env, but can be added to
env []string env []string
wrapper RunnerUtil
PluginClientConfig PluginClientConfig
} }
@ -43,7 +43,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
cmd.Env = append(cmd.Env, rc.env...) cmd.Env = append(cmd.Env, rc.env...)
// Add the mlock setting to the ENV of the plugin // Add the mlock setting to the ENV of the plugin
if rc.MLock || (rc.wrapper != nil && rc.wrapper.MlockEnabled()) { if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true"))
} }
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version))
@ -54,6 +54,9 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode)
cmd.Env = append(cmd.Env, metadataEnv) cmd.Env = append(cmd.Env, metadataEnv)
automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS)
cmd.Env = append(cmd.Env, automtlsEnv)
var clientTLSConfig *tls.Config var clientTLSConfig *tls.Config
if !rc.AutoMTLS && !rc.IsMetadataMode { if !rc.AutoMTLS && !rc.IsMetadataMode {
// Get a CA TLS Certificate // Get a CA TLS Certificate
@ -70,7 +73,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
// Use CA to sign a server cert and wrap the values in a response wrapped // Use CA to sign a server cert and wrap the values in a response wrapped
// token. // token.
wrapToken, err := wrapServerConfig(ctx, rc.wrapper, certBytes, key) wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -120,7 +123,7 @@ func Env(env ...string) RunOpt {
func Runner(wrapper RunnerUtil) RunOpt { func Runner(wrapper RunnerUtil) RunOpt {
return func(rc *runConfig) { return func(rc *runConfig) {
rc.wrapper = wrapper rc.Wrapper = wrapper
} }
} }

View File

@ -5,7 +5,8 @@ import (
"time" "time"
log "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/go-plugin"
"github.com/hashicorp/go-version"
"github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/helper/wrapping"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -14,7 +15,8 @@ import (
// Looker defines the plugin Lookup function that looks into the plugin catalog // Looker defines the plugin Lookup function that looks into the plugin catalog
// for available plugins and returns a PluginRunner // for available plugins and returns a PluginRunner
type Looker interface { type Looker interface {
LookupPlugin(context.Context, string, consts.PluginType) (*PluginRunner, error) LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*PluginRunner, error)
LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*PluginRunner, error)
} }
// RunnerUtil interface defines the functions needed by the runner to wrap the // RunnerUtil interface defines the functions needed by the runner to wrap the
@ -35,6 +37,7 @@ type LookRunnerUtil interface {
type PluginClient interface { type PluginClient interface {
Conn() grpc.ClientConnInterface Conn() grpc.ClientConnInterface
Reload() error
plugin.ClientProtocol plugin.ClientProtocol
} }
@ -45,6 +48,7 @@ const MultiplexingCtxKey string = "multiplex_id"
type PluginRunner struct { type PluginRunner struct {
Name string `json:"name" structs:"name"` Name string `json:"name" structs:"name"`
Type consts.PluginType `json:"type" structs:"type"` Type consts.PluginType `json:"type" structs:"type"`
Version string `json:"version" structs:"version"`
Command string `json:"command" structs:"command"` Command string `json:"command" structs:"command"`
Args []string `json:"args" structs:"args"` Args []string `json:"args" structs:"args"`
Env []string `json:"env" structs:"env"` Env []string `json:"env" structs:"env"`
@ -81,6 +85,20 @@ func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil,
) )
} }
// VersionedPlugin holds any versioning information stored about a plugin in the
// plugin catalog.
type VersionedPlugin struct {
Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses.
Name string `json:"name"`
Version string `json:"version"`
SHA256 string `json:"sha256,omitempty"`
Builtin bool `json:"builtin"`
DeprecationStatus string `json:"deprecation_status,omitempty"`
// Pre-parsed semver struct of the Version field
SemanticVersion *version.Version `json:"-"`
}
// CtxCancelIfCanceled takes a context cancel func and a context. If the context is // CtxCancelIfCanceled takes a context cancel func and a context. If the context is
// shutdown the cancelfunc is called. This is useful for merging two cancel // shutdown the cancelfunc is called. This is useful for merging two cancel
// functions. // functions.

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.27.1 // protoc-gen-go v1.28.1
// protoc v3.19.4 // protoc v3.21.5
// source: sdk/logical/identity.proto // source: sdk/logical/identity.proto
package logical package logical

View File

@ -137,3 +137,20 @@ type Auditor interface {
AuditRequest(ctx context.Context, input *LogInput) error AuditRequest(ctx context.Context, input *LogInput) error
AuditResponse(ctx context.Context, input *LogInput) error AuditResponse(ctx context.Context, input *LogInput) error
} }
// Externaler allows us to check if a backend is running externally (i.e., over GRPC)
type Externaler interface {
IsExternal() bool
}
type PluginVersion struct {
Version string
}
// PluginVersioner is an optional interface to return version info.
type PluginVersioner interface {
// PluginVersion returns the version for the backend
PluginVersion() PluginVersion
}
var EmptyPluginVersion = PluginVersion{""}

View File

@ -3,6 +3,7 @@ package logical
import ( import (
"context" "context"
"crypto" "crypto"
"crypto/cipher"
"io" "io"
) )
@ -35,6 +36,7 @@ type ManagedKey interface {
type ( type (
ManagedKeyConsumer func(context.Context, ManagedKey) error ManagedKeyConsumer func(context.Context, ManagedKey) error
ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error
ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error
) )
type ManagedKeySystemView interface { type ManagedKeySystemView interface {
@ -51,6 +53,12 @@ type ManagedKeySystemView interface {
// WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID // with the same semantics as WithManagedKeyByUUID
WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error
// WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByName
WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error
// WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID
WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error
} }
type ManagedAsymmetricKey interface { type ManagedAsymmetricKey interface {
@ -82,3 +90,8 @@ type ManagedSigningKey interface {
// as needed so as to use per request contexts. // as needed so as to use per request contexts.
GetSigner(context.Context) (crypto.Signer, error) GetSigner(context.Context) (crypto.Signer, error)
} }
type ManagedEncryptingKey interface {
ManagedKey
GetAEAD(iv []byte) (cipher.AEAD, error)
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.27.1 // protoc-gen-go v1.28.1
// protoc v3.19.4 // protoc v3.21.5
// source: sdk/logical/plugin.proto // source: sdk/logical/plugin.proto
package logical package logical

View File

@ -365,6 +365,7 @@ const (
ListOperation = "list" ListOperation = "list"
HelpOperation = "help" HelpOperation = "help"
AliasLookaheadOperation = "alias-lookahead" AliasLookaheadOperation = "alias-lookahead"
ResolveRoleOperation = "resolve-role"
// The operations below are called globally, the path is less relevant. // The operations below are called globally, the path is less relevant.
RevokeOperation Operation = "revoke" RevokeOperation Operation = "revoke"
@ -377,7 +378,6 @@ type MFACreds map[string][]string
// InitializationRequest stores the parameters and context of an Initialize() // InitializationRequest stores the parameters and context of an Initialize()
// call being made to a logical.Backend. // call being made to a logical.Backend.
type InitializationRequest struct { type InitializationRequest struct {
// Storage can be used to durably store and retrieve state. // Storage can be used to durably store and retrieve state.
Storage Storage Storage Storage
} }

View File

@ -310,3 +310,12 @@ func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) {
} }
var _ WrappingResponseWriter = &StatusHeaderResponseWriter{} var _ WrappingResponseWriter = &StatusHeaderResponseWriter{}
// ResolveRoleResponse returns a standard response to be returned by functions handling a ResolveRoleOperation
func ResolveRoleResponse(roleName string) (*Response, error) {
return &Response{
Data: map[string]interface{}{
"role": roleName,
},
}, nil
}

View File

@ -54,7 +54,15 @@ type SystemView interface {
// LookupPlugin looks into the plugin catalog for a plugin with the given // LookupPlugin looks into the plugin catalog for a plugin with the given
// name. Returns a PluginRunner or an error if a plugin can not be found. // name. Returns a PluginRunner or an error if a plugin can not be found.
LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error)
// LookupPluginVersion looks into the plugin catalog for a plugin with the given
// name and version. Returns a PluginRunner or an error if a plugin can not be found.
LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error)
// ListVersionedPlugins returns information about all plugins of a certain
// type in the catalog, including any versioning information stored for them.
ListVersionedPlugins(ctx context.Context, pluginType consts.PluginType) ([]pluginutil.VersionedPlugin, error)
// NewPluginClient returns a client for managing the lifecycle of plugin // NewPluginClient returns a client for managing the lifecycle of plugin
// processes // processes
@ -168,6 +176,14 @@ func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.Plu
return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") return nil, errors.New("LookupPlugin is not implemented in StaticSystemView")
} }
func (d StaticSystemView) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) {
return nil, errors.New("LookupPluginVersion is not implemented in StaticSystemView")
}
func (d StaticSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) {
return nil, errors.New("ListVersionedPlugins is not implemented in StaticSystemView")
}
func (d StaticSystemView) MlockEnabled() bool { func (d StaticSystemView) MlockEnabled() bool {
return d.EnableMlock return d.EnableMlock
} }

View File

@ -0,0 +1,204 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.5
// source: sdk/logical/version.proto
package logical
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *Empty) Reset() {
*x = Empty{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_logical_version_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_sdk_logical_version_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
func (*Empty) Descriptor() ([]byte, []int) {
return file_sdk_logical_version_proto_rawDescGZIP(), []int{0}
}
// VersionReply is the reply for the Version method.
type VersionReply struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PluginVersion string `protobuf:"bytes,1,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"`
}
func (x *VersionReply) Reset() {
*x = VersionReply{}
if protoimpl.UnsafeEnabled {
mi := &file_sdk_logical_version_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *VersionReply) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VersionReply) ProtoMessage() {}
func (x *VersionReply) ProtoReflect() protoreflect.Message {
mi := &file_sdk_logical_version_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead.
func (*VersionReply) Descriptor() ([]byte, []int) {
return file_sdk_logical_version_proto_rawDescGZIP(), []int{1}
}
func (x *VersionReply) GetPluginVersion() string {
if x != nil {
return x.PluginVersion
}
return ""
}
var File_sdk_logical_version_proto protoreflect.FileDescriptor
var file_sdk_logical_version_proto_rawDesc = []byte{
0x0a, 0x19, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67,
0x69, 0x63, 0x61, 0x6c, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x35, 0x0a,
0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a,
0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
0x1a, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f,
0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61,
0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_sdk_logical_version_proto_rawDescOnce sync.Once
file_sdk_logical_version_proto_rawDescData = file_sdk_logical_version_proto_rawDesc
)
func file_sdk_logical_version_proto_rawDescGZIP() []byte {
file_sdk_logical_version_proto_rawDescOnce.Do(func() {
file_sdk_logical_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_version_proto_rawDescData)
})
return file_sdk_logical_version_proto_rawDescData
}
var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_sdk_logical_version_proto_goTypes = []interface{}{
(*Empty)(nil), // 0: logical.Empty
(*VersionReply)(nil), // 1: logical.VersionReply
}
var file_sdk_logical_version_proto_depIdxs = []int32{
0, // 0: logical.PluginVersion.Version:input_type -> logical.Empty
1, // 1: logical.PluginVersion.Version:output_type -> logical.VersionReply
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_sdk_logical_version_proto_init() }
func file_sdk_logical_version_proto_init() {
if File_sdk_logical_version_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*VersionReply); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_sdk_logical_version_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_sdk_logical_version_proto_goTypes,
DependencyIndexes: file_sdk_logical_version_proto_depIdxs,
MessageInfos: file_sdk_logical_version_proto_msgTypes,
}.Build()
File_sdk_logical_version_proto = out.File
file_sdk_logical_version_proto_rawDesc = nil
file_sdk_logical_version_proto_goTypes = nil
file_sdk_logical_version_proto_depIdxs = nil
}

View File

@ -0,0 +1,17 @@
syntax = "proto3";
package logical;
option go_package = "github.com/hashicorp/vault/sdk/logical";
message Empty {}
// VersionReply is the reply for the Version method.
message VersionReply {
string plugin_version = 1;
}
// PluginVersion is an optional RPC service implemented by plugins.
service PluginVersion {
// Version returns version information for the plugin.
rpc Version(Empty) returns (VersionReply);
}

View File

@ -0,0 +1,103 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package logical
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// PluginVersionClient is the client API for PluginVersion service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type PluginVersionClient interface {
// Version returns version information for the plugin.
Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error)
}
type pluginVersionClient struct {
cc grpc.ClientConnInterface
}
func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient {
return &pluginVersionClient{cc}
}
func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) {
out := new(VersionReply)
err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// PluginVersionServer is the server API for PluginVersion service.
// All implementations must embed UnimplementedPluginVersionServer
// for forward compatibility
type PluginVersionServer interface {
// Version returns version information for the plugin.
Version(context.Context, *Empty) (*VersionReply, error)
mustEmbedUnimplementedPluginVersionServer()
}
// UnimplementedPluginVersionServer must be embedded to have forward compatible implementations.
type UnimplementedPluginVersionServer struct {
}
func (UnimplementedPluginVersionServer) Version(context.Context, *Empty) (*VersionReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
}
func (UnimplementedPluginVersionServer) mustEmbedUnimplementedPluginVersionServer() {}
// UnsafePluginVersionServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to PluginVersionServer will
// result in compilation errors.
type UnsafePluginVersionServer interface {
mustEmbedUnimplementedPluginVersionServer()
}
func RegisterPluginVersionServer(s grpc.ServiceRegistrar, srv PluginVersionServer) {
s.RegisterService(&PluginVersion_ServiceDesc, srv)
}
func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PluginVersionServer).Version(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/logical.PluginVersion/Version",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PluginVersionServer).Version(ctx, req.(*Empty))
}
return interceptor(ctx, in, info, handler)
}
// PluginVersion_ServiceDesc is the grpc.ServiceDesc for PluginVersion service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var PluginVersion_ServiceDesc = grpc.ServiceDesc{
ServiceName: "logical.PluginVersion",
HandlerType: (*PluginVersionServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Version",
Handler: _PluginVersion_Version_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "sdk/logical/version.proto",
}

View File

@ -1,5 +1,10 @@
package physical package physical
import (
"encoding/hex"
"fmt"
)
// Entry is used to represent data stored by the physical backend // Entry is used to represent data stored by the physical backend
type Entry struct { type Entry struct {
Key string Key string
@ -9,3 +14,7 @@ type Entry struct {
// Only used in replication // Only used in replication
ValueHash []byte ValueHash []byte
} }
func (e *Entry) String() string {
return fmt.Sprintf("Key: %s. SealWrap: %t. Value: %s. ValueHash: %s", e.Key, e.SealWrap, hex.EncodeToString(e.Value), hex.EncodeToString(e.ValueHash))
}

View File

@ -10,10 +10,9 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/armon/go-radix"
log "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical"
radix "github.com/armon/go-radix"
) )
// Verify interfaces are satisfied // Verify interfaces are satisfied
@ -31,6 +30,7 @@ var (
GetDisabledError = errors.New("get operations disabled in inmem backend") GetDisabledError = errors.New("get operations disabled in inmem backend")
DeleteDisabledError = errors.New("delete operations disabled in inmem backend") DeleteDisabledError = errors.New("delete operations disabled in inmem backend")
ListDisabledError = errors.New("list operations disabled in inmem backend") ListDisabledError = errors.New("list operations disabled in inmem backend")
GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in inmem backend")
) )
// InmemBackend is an in-memory only physical backend. It is useful // InmemBackend is an in-memory only physical backend. It is useful
@ -45,6 +45,7 @@ type InmemBackend struct {
failPut *uint32 failPut *uint32
failDelete *uint32 failDelete *uint32
failList *uint32 failList *uint32
failGetInTxn *uint32
logOps bool logOps bool
maxValueSize int maxValueSize int
} }
@ -73,6 +74,7 @@ func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, erro
failPut: new(uint32), failPut: new(uint32),
failDelete: new(uint32), failDelete: new(uint32),
failList: new(uint32), failList: new(uint32),
failGetInTxn: new(uint32),
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
maxValueSize: maxValueSize, maxValueSize: maxValueSize,
}, nil }, nil
@ -100,6 +102,7 @@ func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.
failPut: new(uint32), failPut: new(uint32),
failDelete: new(uint32), failDelete: new(uint32),
failList: new(uint32), failList: new(uint32),
failGetInTxn: new(uint32),
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
maxValueSize: maxValueSize, maxValueSize: maxValueSize,
}, },
@ -189,6 +192,14 @@ func (i *InmemBackend) FailGet(fail bool) {
atomic.StoreUint32(i.failGet, val) atomic.StoreUint32(i.failGet, val)
} }
func (i *InmemBackend) FailGetInTxn(fail bool) {
var val uint32
if fail {
val = 1
}
atomic.StoreUint32(i.failGetInTxn, val)
}
// Delete is used to permanently delete an entry // Delete is used to permanently delete an entry
func (i *InmemBackend) Delete(ctx context.Context, key string) error { func (i *InmemBackend) Delete(ctx context.Context, key string) error {
i.permitPool.Acquire() i.permitPool.Acquire()
@ -280,7 +291,7 @@ func (i *InmemBackend) FailList(fail bool) {
atomic.StoreUint32(i.failList, val) atomic.StoreUint32(i.failList, val)
} }
// Implements the transaction interface // Transaction implements the transaction interface
func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
t.permitPool.Acquire() t.permitPool.Acquire()
defer t.permitPool.Release() defer t.permitPool.Release()
@ -288,5 +299,12 @@ func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*phy
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()
failGetInTxn := atomic.LoadUint32(t.failGetInTxn)
for _, t := range txns {
if t.Operation == physical.GetOperation && failGetInTxn != 0 {
return GetInTxnDisabledError
}
}
return physical.GenericTransactionHandler(ctx, t, txns) return physical.GenericTransactionHandler(ctx, t, txns)
} }

View File

@ -2,8 +2,9 @@ package physical
import ( import (
"context" "context"
"fmt"
multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
) )
// TxnEntry is an operation that takes atomically as part of // TxnEntry is an operation that takes atomically as part of
@ -13,6 +14,10 @@ type TxnEntry struct {
Entry *Entry Entry *Entry
} }
func (t *TxnEntry) String() string {
return fmt.Sprintf("Operation: %s. Entry: %s", t.Operation, t.Entry)
}
// Transactional is an optional interface for backends that // Transactional is an optional interface for backends that
// support doing transactional updates of multiple keys. This is // support doing transactional updates of multiple keys. This is
// required for some features such as replication. // required for some features such as replication.
@ -40,6 +45,19 @@ func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns
rollbackStack := make([]*TxnEntry, 0, len(txns)) rollbackStack := make([]*TxnEntry, 0, len(txns))
var dirty bool var dirty bool
// Update all of our GET transaction entries, so we can populate existing values back at the wal layer.
for _, txn := range txns {
if txn.Operation == GetOperation {
entry, err := t.GetInternal(ctx, txn.Entry.Key)
if err != nil {
return err
}
if entry != nil {
txn.Entry.Value = entry.Value
}
}
}
// We walk the transactions in order; each successful operation goes into a // We walk the transactions in order; each successful operation goes into a
// LIFO for rollback if we hit an error along the way // LIFO for rollback if we hit an error along the way
TxnWalk: TxnWalk:
@ -78,6 +96,7 @@ TxnWalk:
dirty = true dirty = true
break TxnWalk break TxnWalk
} }
// Nothing existed so in fact rolling back requires a delete // Nothing existed so in fact rolling back requires a delete
var rollbackEntry *TxnEntry var rollbackEntry *TxnEntry
if entry == nil { if entry == nil {

View File

@ -1,3 +1,98 @@
## 2.4.0
### Features
- DeferCleanup supports functions with multiple-return values [5e33c75]
- Add GinkgoLogr (#1067) [bf78c28]
- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
### Fixes
- correcting some typos (#1064) [1403d3c]
- fix flaky internal_integration interupt specs [2105ba3]
- Correct busted link in README [be6b5b9]
### Maintenance
- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
- Add GHA to dependabot config [4442772]
## 2.3.1
## Fixes
Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
### Maintenance
- bump gomega to v1.22.0 [822a937]
## 2.3.0
### Interruptible Nodes and Timeouts
Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
```go
It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
// do things until `ctx.Done()` is closed, for example:
req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
Expect(err).NotTo(HaveOccured())
_, err := http.DefaultClient.Do(req)
Expect(err).NotTo(HaveOccured())
Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
```
and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
### Features
- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
## Fixes
- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
- Make the outline command able to use the DSL import [1be2427]
## Maintenance
- chore(docs): delete no meaning d [57c373c]
- chore(docs): Fix hyperlinks [30526d5]
- chore(docs): fix code blocks without language settings [cf611c4]
- fix intra-doc link [b541bcb]
## 2.2.0
### Generate real-time Progress Reports [f91377c]
Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
### Features
- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
### Maintenance
- Modernize the invocation of Ginkgo in github actions [0ffde58]
- Update reocmmended CI settings in docs [896bbb9]
- Speed up unnecessarily slow integration test [6d3a90e]
## 2.1.6 ## 2.1.6
### Fixes ### Fixes
@ -77,7 +172,7 @@ See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkg
Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess 1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
## 1.16.4 ## 1.16.4
@ -184,7 +279,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] - replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
- improve ginkgo performance - makes progress on #644 [a14f98e] - improve ginkgo performance - makes progress on #644 [a14f98e]
- fix convert integration tests [1f8ba69] - fix convert integration tests [1f8ba69]
- fix typo succesful -> successful (#663) [1ea49cf] - fix typo successful -> successful (#663) [1ea49cf]
- Fix invalid link (#658) [b886136] - Fix invalid link (#658) [b886136]
- convert utility : Include comments from source (#657) [1077c6d] - convert utility : Include comments from source (#657) [1077c6d]
- Explain what BDD means [d79e7fb] - Explain what BDD means [d79e7fb]
@ -278,7 +373,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] - Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
- all: gofmt [000d317] - all: gofmt [000d317]
- Increase eventually timeout to 30s [c73579c] - Increase eventually timeout to 30s [c73579c]
- Clarify asynchronous test behaviour [294d8f4] - Clarify asynchronous test behavior [294d8f4]
- Travis badge should only show master [26d2143] - Travis badge should only show master [26d2143]
## 1.5.0 5/10/2018 ## 1.5.0 5/10/2018
@ -296,13 +391,13 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] - When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] - `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] - Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] - Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] - Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
- Add an extra new line after reporting spec run completion for test2json [874520d] - Add an extra new line after reporting spec run completion for test2json [874520d]
- added name name field to junit reported testsuite [ae61c63] - added name name field to junit reported testsuite [ae61c63]
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] - Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] - Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] - Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] - Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] - Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] - Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]

View File

@ -8,6 +8,6 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
- Make sure all the tests succeed via `ginkgo -r -p` - Make sure all the tests succeed via `ginkgo -r -p`
- Vet your changes via `go vet ./...` - Vet your changes via `go vet ./...`
- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. - Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
Thanks for supporting Ginkgo! Thanks for supporting Ginkgo!

View File

@ -90,7 +90,7 @@ If you have a question, comment, bug report, feature request, etc. please open a
## Capabilities ## Capabilities
Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs)

View File

@ -1,7 +1,13 @@
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date. 1. Ensure CHANGELOG.md is up to date.
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release - Use
```bash
LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
```
to update the changelog
- Categorize the changes into - Categorize the changes into
- Breaking Changes (requires a major version) - Breaking Changes (requires a major version)
- New Features (minor version) - New Features (minor version)

Some files were not shown because too many files have changed in this diff Show More