mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-29 16:19:29 +00:00
Merge pull request #112 from ceph/devel
Sync devel branch with upstream
This commit is contained in:
commit
52fc3942b1
24
.mergify.yml
24
.mergify.yml
@ -26,15 +26,15 @@ queue_rules:
|
||||
- "status-success=golangci-lint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- and:
|
||||
@ -75,15 +75,15 @@ pull_request_rules:
|
||||
- "status-success=golangci-lint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- "status-success=DCO"
|
||||
@ -114,15 +114,15 @@ pull_request_rules:
|
||||
- "status-success=commitlint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- "status-success=DCO"
|
||||
@ -145,15 +145,15 @@ pull_request_rules:
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- "status-success=DCO"
|
||||
|
@ -50,8 +50,8 @@ ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17
|
||||
# CSI sidecar version
|
||||
CSI_ATTACHER_VERSION=v3.5.0
|
||||
CSI_SNAPSHOTTER_VERSION=v6.0.1
|
||||
CSI_PROVISIONER_VERSION=v3.1.0
|
||||
CSI_RESIZER_VERSION=v1.5.0
|
||||
CSI_PROVISIONER_VERSION=v3.2.1
|
||||
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.5.1
|
||||
|
||||
# e2e settings
|
||||
|
@ -99,6 +99,8 @@ charts and their default values.
|
||||
| `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` |
|
||||
| `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` |
|
||||
| `nodeplugin.forcecephkernelclient` | Set to true to enable Ceph Kernel clients on kernel < 4.17 which support quotas | `true` |
|
||||
| `nodeplugin.kernelmountoptions` | Comma separated string of mount options accepted by cephfs kernel mounter quotas | `""` |
|
||||
| `nodeplugin.fusemountoptions` | Comma separated string of mount options accepted by ceph-fuse mounter quotas | `""` |
|
||||
| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` |
|
||||
| `provisioner.name` | Specifies the name of provisioner | `provisioner` |
|
||||
| `provisioner.replicaCount` | Specifies the replicaCount | `3` |
|
||||
@ -107,7 +109,7 @@ charts and their default values.
|
||||
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.1.0` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.2.1` |
|
||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.5.0` |
|
||||
|
@ -9,3 +9,4 @@ metadata:
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: false
|
||||
fsGroupPolicy: File
|
||||
|
@ -72,6 +72,8 @@ spec:
|
||||
{{- if .Values.nodeplugin.forcecephkernelclient }}
|
||||
- "--forcecephkernelclient={{ .Values.nodeplugin.forcecephkernelclient }}"
|
||||
{{- end }}
|
||||
- "--kernelmountoptions={{ .Values.nodeplugin.kernelmountoptions }}"
|
||||
- "--fusemountoptions={{ .Values.nodeplugin.fusemountoptions }}"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v={{ .Values.logLevel }}"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
|
@ -15,7 +15,7 @@ rules:
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete","patch"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "delete","patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
@ -63,6 +63,8 @@ spec:
|
||||
- "--leader-election=true"
|
||||
- "--retry-interval-start=500ms"
|
||||
- "--extra-create-metadata=true"
|
||||
- "--feature-gates=HonorPVReclaimPolicy=true"
|
||||
- "--prevent-volume-mode-conversion=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
|
@ -102,6 +102,11 @@ nodeplugin:
|
||||
# on kernel < 4.17 which support quotas
|
||||
# forcecephkernelclient: true
|
||||
|
||||
# common mount options to apply all mounting
|
||||
# example: kernelmountoptions: "recover_session=clean"
|
||||
kernelmountoptions: ""
|
||||
fusemountoptions: ""
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
@ -166,7 +171,7 @@ provisioner:
|
||||
provisioner:
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/csi-provisioner
|
||||
tag: v3.1.0
|
||||
tag: v3.2.1
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
|
@ -117,7 +117,7 @@ charts and their default values.
|
||||
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `canary` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.2.1` |
|
||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
|
||||
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.5.0` |
|
||||
|
@ -64,6 +64,8 @@ spec:
|
||||
- "--retry-interval-start=500ms"
|
||||
- "--default-fstype={{ .Values.provisioner.defaultFSType }}"
|
||||
- "--extra-create-metadata=true"
|
||||
- "--feature-gates=HonorPVReclaimPolicy=true"
|
||||
- "--prevent-volume-mode-conversion=true"
|
||||
{{- if .Values.topology.enabled }}
|
||||
- "--feature-gates=Topology=true"
|
||||
{{- end }}
|
||||
|
@ -205,12 +205,8 @@ provisioner:
|
||||
|
||||
provisioner:
|
||||
image:
|
||||
# TODO: replace with released image version.
|
||||
# canary image is being to be used to test pvc-pvc clone
|
||||
# with differe sc feature.
|
||||
# see: https://github.com/kubernetes-csi/external-provisioner/pull/699
|
||||
repository: gcr.io/k8s-staging-sig-storage/csi-provisioner
|
||||
tag: canary
|
||||
tag: v3.2.1
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
|
@ -88,6 +88,16 @@ func init() {
|
||||
"forcecephkernelclient",
|
||||
false,
|
||||
"enable Ceph Kernel clients on kernel < 4.17 which support quotas")
|
||||
flag.StringVar(
|
||||
&conf.KernelMountOptions,
|
||||
"kernelmountoptions",
|
||||
"",
|
||||
"Comma separated string of mount options accepted by cephfs kernel mounter")
|
||||
flag.StringVar(
|
||||
&conf.FuseMountOptions,
|
||||
"fusemountoptions",
|
||||
"",
|
||||
"Comma separated string of mount options accepted by ceph-fuse mounter")
|
||||
|
||||
// liveness/grpc metrics related flags
|
||||
flag.IntVar(&conf.MetricsPort, "metricsport", 8080, "TCP port for liveness/grpc metrics requests")
|
||||
|
@ -23,10 +23,10 @@ RUN source /build.env && \
|
||||
# test if the downloaded version of Golang works (different arch?)
|
||||
RUN ${GOROOT}/bin/go version && ${GOROOT}/bin/go env
|
||||
|
||||
# FIXME: Ceph does not need Apache Arrow anymore, some container images may
|
||||
# still have the repository enabled. Disabling the repository can be removed in
|
||||
# the future, see https://github.com/ceph/ceph-container/pull/1990 .
|
||||
RUN dnf config-manager --disable apache-arrow-centos || true
|
||||
# TODO: remove the following cmd, when issue
|
||||
# https://github.com/ceph/ceph-container/issues/2034 is fixed.
|
||||
RUN dnf config-manager --disable \
|
||||
tcmu-runner,tcmu-runner-source,tcmu-runner-noarch || true
|
||||
|
||||
RUN dnf -y install \
|
||||
librados-devel librbd-devel \
|
||||
|
@ -43,7 +43,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.2.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
@ -51,6 +51,8 @@ spec:
|
||||
- "--leader-election=true"
|
||||
- "--retry-interval-start=500ms"
|
||||
- "--feature-gates=Topology=false"
|
||||
- "--feature-gates=HonorPVReclaimPolicy=true"
|
||||
- "--prevent-volume-mode-conversion=true"
|
||||
- "--extra-create-metadata=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
|
@ -18,7 +18,7 @@ rules:
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete", "patch"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
@ -8,3 +8,4 @@ metadata:
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: false
|
||||
fsGroupPolicy: File
|
||||
|
@ -40,13 +40,17 @@ spec:
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.2.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
- "--timeout=150s"
|
||||
- "--leader-election=true"
|
||||
- "--retry-interval-start=500ms"
|
||||
# TODO: enable the feature-gate once error has been handled in nfs.
|
||||
# Issue #3230
|
||||
# - "--feature-gates=HonorPVReclaimPolicy=true"
|
||||
- "--prevent-volume-mode-conversion=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
|
@ -47,11 +47,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
# TODO: replace with released image version.
|
||||
# Canary image is being to be used to test pvc-pvc clone
|
||||
# with differe sc feature.
|
||||
# see: https://github.com/kubernetes-csi/external-provisioner/pull/699
|
||||
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:canary
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.2.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
@ -60,6 +56,8 @@ spec:
|
||||
- "--leader-election=true"
|
||||
# set it to true to use topology based provisioning
|
||||
- "--feature-gates=Topology=false"
|
||||
- "--feature-gates=HonorPVReclaimPolicy=true"
|
||||
- "--prevent-volume-mode-conversion=true"
|
||||
# if fstype is not specified in storageclass, ext4 is default
|
||||
- "--default-fstype=ext4"
|
||||
- "--extra-create-metadata=true"
|
||||
|
@ -59,6 +59,8 @@ that should be resolved in v14.2.3.
|
||||
| `--timeout` | `3s` | Probe timeout in seconds |
|
||||
| `--histogramoption` | `0.5,2,6` | [Deprecated] Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
|
||||
| `--forcecephkernelclient` | `false` | Force enabling Ceph Kernel clients for mounting on kernels < 4.17 |
|
||||
| `--kernelmountoptions` | _empty_ | Comma separated string of mount options accepted by cephfs kernel mounter |
|
||||
| `--fusemountoptions` | _empty_ | Comma separated string of mount options accepted by ceph-fuse mounter |
|
||||
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
|
||||
|
||||
**NOTE:** The parameter `-forcecephkernelclient` enables the Kernel
|
||||
|
@ -328,51 +328,49 @@ var _ = Describe(cephfsType, func() {
|
||||
})
|
||||
|
||||
By("verify RWOP volume support", func() {
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
|
||||
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
// create application
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
// create application
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
return
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
|
||||
validateOmapCount(f, 1, cephfsType, metadataPool, volumesType)
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
|
||||
validateOmapCount(f, 1, cephfsType, metadataPool, volumesType)
|
||||
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
|
74
e2e/nfs.go
74
e2e/nfs.go
@ -371,49 +371,47 @@ var _ = Describe("nfs", func() {
|
||||
})
|
||||
|
||||
By("verify RWOP volume support", func() {
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
|
||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
// create application
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
// create application
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
return
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 1, fileSystemName, defaultSubvolumegroup)
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 1, fileSystemName, defaultSubvolumegroup)
|
||||
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, defaultSubvolumegroup)
|
||||
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, defaultSubvolumegroup)
|
||||
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
|
144
e2e/rbd.go
144
e2e/rbd.go
@ -876,85 +876,81 @@ var _ = Describe("RBD", func() {
|
||||
})
|
||||
|
||||
By("create a Block mode RWOP PVC and bind it to more than one app", func() {
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
|
||||
pvc, err := loadPVC(rawPVCRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(rawAppRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
pvc, err := loadPVC(rawPVCRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(rawAppRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
})
|
||||
|
||||
By("create a RWOP PVC and bind it to more than one app", func() {
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
})
|
||||
|
||||
By("create an erasure coded PVC and bind it to an app", func() {
|
||||
|
@ -578,7 +578,7 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
||||
if pvc.Spec.VolumeMode != nil {
|
||||
isBlockMode = (*pvc.Spec.VolumeMode == v1.PersistentVolumeBlock)
|
||||
}
|
||||
if (!isBlockMode || k8sVersionGreaterEquals(f.ClientSet, 1, 22)) && !isOpenShift {
|
||||
if !isBlockMode && !isOpenShift {
|
||||
err = getMetricsForPVC(f, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1479,15 +1479,13 @@ func validateController(
|
||||
return deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
}
|
||||
|
||||
// nolint:deadcode,unused // Unused code will be used in future.
|
||||
// k8sVersionGreaterEquals checks the ServerVersion of the Kubernetes cluster
|
||||
// and compares it to the major.minor version passed. In case the version of
|
||||
// the cluster is equal or higher to major.minor, `true` is returned, `false`
|
||||
// otherwise.
|
||||
//
|
||||
// If fetching the ServerVersion of the Kubernetes cluster fails, the calling
|
||||
// test case is marked as `FAILED` and gets aborted.
|
||||
//
|
||||
// nolint:unparam // currently major is always 1, this can change in the future
|
||||
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
|
||||
v, err := c.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
|
28
go.mod
28
go.mod
@ -3,9 +3,9 @@ module github.com/ceph/ceph-csi
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/IBM/keyprotect-go-client v0.7.0
|
||||
github.com/IBM/keyprotect-go-client v0.8.0
|
||||
github.com/aws/aws-sdk-go v1.44.28
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.9
|
||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
|
||||
github.com/ceph/go-ceph v0.16.0
|
||||
@ -15,7 +15,7 @@ require (
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/vault/api v1.6.0
|
||||
github.com/hashicorp/vault/api v1.7.2
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.11.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.1
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||
@ -23,7 +23,7 @@ require (
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/stretchr/testify v1.8.0
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158
|
||||
google.golang.org/grpc v1.47.0
|
||||
@ -32,7 +32,7 @@ require (
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
k8s.io/klog/v2 v2.70.1
|
||||
//
|
||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||
//
|
||||
@ -47,11 +47,11 @@ require (
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/armon/go-metrics v0.3.9 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect
|
||||
github.com/aws/smithy-go v1.11.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 // indirect
|
||||
github.com/aws/smithy-go v1.12.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
@ -75,7 +75,7 @@ require (
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@ -83,10 +83,10 @@ require (
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-plugin v1.4.3 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
@ -94,7 +94,7 @@ require (
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/vault v1.4.2 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.5.0 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.5.1 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
|
59
go.sum
59
go.sum
@ -79,8 +79,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
|
||||
github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw=
|
||||
github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI=
|
||||
github.com/IBM/keyprotect-go-client v0.7.0 h1:JstSHD14Lp6ihwQseyPuGcs1AjOBjAmcisP0dTBA6A0=
|
||||
github.com/IBM/keyprotect-go-client v0.7.0/go.mod h1:SVr2ylV/fhSQPDiUjWirN9fsyWFCNNbt8GIT8hPJVjE=
|
||||
github.com/IBM/keyprotect-go-client v0.8.0 h1:IgLKSigHRpCCl5cZjBkOYziUZ9zxn6w9iRh+KA8Siww=
|
||||
github.com/IBM/keyprotect-go-client v0.8.0/go.mod h1:yr8h2noNgU8vcbs+vhqoXp3Lmv73PI0zAc6VMgFvWwM=
|
||||
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
|
||||
github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
|
||||
github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
@ -143,18 +143,18 @@ github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.28 h1:h/OAqEqY18wq//v6h4GNPMmCkxuzSDrWuGyrvSiRqf4=
|
||||
github.com/aws/aws-sdk-go v1.44.28/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 h1:Ah9h1TZD9E2S1LzHpViBO3Jz9FPL5+rmflmb8hXirtI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 h1:Zt7DDk5V7SyQULUUwIKzsROtVzp/kVvcz15uQx/Tkow=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 h1:eeXdGVtXEe+2Jc49+/vAzna3FAQnUD4AagAw8tzbmfc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 h1:0ZxYAZ1cn7Swi/US55VKciCE6RhRHIwCKIWaMLdT6pg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 h1:HLzjwQM9975FQWSF3uENDGHT1gFQm/q3QXu2BYIcI08=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
|
||||
github.com/aws/smithy-go v1.11.3 h1:DQixirEFM9IaKxX1olZ3ke3nvxRS2xMDteKIDWxozW8=
|
||||
github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 h1:2C0pYHcUBmdzPj+EKNC4qj97oK6yjrUhc1KoSodglvk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 h1:2J+jdlBJWEmTyAwC82Ym68xCykIvnSnIN18b8xHGlcc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 h1:oKnAXxSF2FUvfgw8uzU/v9OTYorJJZ8eBmWhr9TWVVQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 h1:yOfILxyjmtr2ubRkRJldlHDFBhf5vw4CzhbwWIBmimQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y=
|
||||
github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0=
|
||||
github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -494,8 +494,9 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
@ -582,8 +583,9 @@ github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
@ -593,8 +595,8 @@ github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PU
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5 h1:MBgwAFPUbfuI0+tmDU/aeM1MARvdbqWmiieXIalKqDE=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||
@ -664,8 +666,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
||||
github.com/hashicorp/vault/api v1.6.0 h1:B8UUYod1y1OoiGHq9GtpiqSnGOUEWHaA26AY8RQEDY4=
|
||||
github.com/hashicorp/vault/api v1.6.0/go.mod h1:h1K70EO2DgnBaTz5IsL6D5ERsNt5Pce93ueVS2+t0Xc=
|
||||
github.com/hashicorp/vault/api v1.7.2 h1:kawHE7s/4xwrdKbkmwQi0wYaIeUhk5ueek7ljuezCVQ=
|
||||
github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M=
|
||||
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
|
||||
@ -675,8 +677,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.5.0 h1:EED7p0OCU3OY5SAqJwSANofY1YKMytm+jDHDQ2EzGVQ=
|
||||
github.com/hashicorp/vault/sdk v0.5.0/go.mod h1:UJZHlfwj7qUJG8g22CuxUgkdJouFrBNvBHCyx8XAPdo=
|
||||
github.com/hashicorp/vault/sdk v0.5.1 h1:zly/TmNgOXCGgWIRA8GojyXzG817POtVh3uzIwzZx+8=
|
||||
github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
@ -1074,16 +1076,19 @@ github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwb
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
@ -1692,8 +1697,9 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/h2non/gock.v1 v1.0.15 h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0=
|
||||
gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
|
||||
gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
@ -1775,8 +1781,9 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-aggregator v0.24.2/go.mod h1:Ju2jNDixn+vqeeKEBfjfpc204bO1pbdXX0N9knCxeMQ=
|
||||
k8s.io/kube-controller-manager v0.24.2/go.mod h1:KDE0yqiEvxYiO0WRpPA4rVx8AcK1vsWydUF37AJ9lTI=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
|
@ -205,23 +205,7 @@ func checkValidCreateVolumeRequest(
|
||||
return errors.New("cloning snapshot-backed volumes is currently not supported")
|
||||
}
|
||||
case sID != nil:
|
||||
if vol.Size < parentVol.Size {
|
||||
return fmt.Errorf(
|
||||
"cannot restore from snapshot %s: volume size %d is smaller than source volume size %d",
|
||||
sID.SnapshotID,
|
||||
parentVol.Size,
|
||||
vol.Size)
|
||||
}
|
||||
|
||||
if vol.BackingSnapshot {
|
||||
if vol.Size != parentVol.Size {
|
||||
return fmt.Errorf(
|
||||
"cannot create snapshot-backed volume of different size: expected %d bytes, got %d bytes",
|
||||
parentVol.Size,
|
||||
vol.Size,
|
||||
)
|
||||
}
|
||||
|
||||
volCaps := req.GetVolumeCapabilities()
|
||||
for _, volCap := range volCaps {
|
||||
mode := volCap.AccessMode.Mode
|
||||
@ -277,7 +261,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
defer volOptions.Destroy()
|
||||
|
||||
if req.GetCapacityRange() != nil {
|
||||
volOptions.Size = util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
volOptions.Size = util.RoundOffCephFSVolSize(req.GetCapacityRange().GetRequiredBytes())
|
||||
}
|
||||
|
||||
parentVol, pvID, sID, err := checkContentSource(ctx, req, cr)
|
||||
@ -672,7 +656,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
return nil, status.Error(codes.InvalidArgument, "cannot expand snapshot-backed volume")
|
||||
}
|
||||
|
||||
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
RoundOffSize := util.RoundOffCephFSVolSize(req.GetCapacityRange().GetRequiredBytes())
|
||||
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err = volClient.ResizeVolume(ctx, RoundOffSize); err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err)
|
||||
|
@ -39,12 +39,6 @@ import (
|
||||
// taken through this additional cluster information.
|
||||
var clusterAdditionalInfo = make(map[string]*localClusterState)
|
||||
|
||||
const (
|
||||
// modeAllRWX can be used for setting permissions to Read-Write-eXecute
|
||||
// for User, Group and Other.
|
||||
modeAllRWX = 0o777
|
||||
)
|
||||
|
||||
// Subvolume holds subvolume information. This includes only the needed members
|
||||
// from fsAdmin.SubVolumeInfo.
|
||||
type Subvolume struct {
|
||||
@ -231,7 +225,6 @@ func (s *subVolumeClient) CreateVolume(ctx context.Context) error {
|
||||
|
||||
opts := fsAdmin.SubVolumeOptions{
|
||||
Size: fsAdmin.ByteCount(s.Size),
|
||||
Mode: modeAllRWX,
|
||||
}
|
||||
if s.Pool != "" {
|
||||
opts.PoolLayout = s.Pool
|
||||
|
@ -64,10 +64,18 @@ func NewControllerServer(d *csicommon.CSIDriver) *ControllerServer {
|
||||
}
|
||||
|
||||
// NewNodeServer initialize a node server for ceph CSI driver.
|
||||
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) *NodeServer {
|
||||
func NewNodeServer(
|
||||
d *csicommon.CSIDriver,
|
||||
t string,
|
||||
topology map[string]string,
|
||||
kernelMountOptions string,
|
||||
fuseMountOptions string,
|
||||
) *NodeServer {
|
||||
return &NodeServer{
|
||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
|
||||
VolumeLocks: util.NewVolumeLocks(),
|
||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
|
||||
VolumeLocks: util.NewVolumeLocks(),
|
||||
kernelMountOptions: kernelMountOptions,
|
||||
fuseMountOptions: fuseMountOptions,
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,7 +130,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology, conf.KernelMountOptions, conf.FuseMountOptions)
|
||||
}
|
||||
|
||||
if conf.IsControllerServer {
|
||||
@ -133,7 +141,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology, conf.KernelMountOptions, conf.FuseMountOptions)
|
||||
fs.cs = NewControllerServer(fs.cd)
|
||||
}
|
||||
|
||||
|
@ -51,8 +51,8 @@ func (ms mountState) String() string {
|
||||
}[int(ms)]
|
||||
}
|
||||
|
||||
func getMountState(path string) (mountState, error) {
|
||||
isMnt, err := util.IsMountPoint(path)
|
||||
func (ns *NodeServer) getMountState(path string) (mountState, error) {
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, path)
|
||||
if err != nil {
|
||||
if util.IsCorruptedMountError(err) {
|
||||
return msCorrupted, nil
|
||||
@ -117,12 +117,12 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
) error {
|
||||
// Check if there is anything to restore.
|
||||
|
||||
stagingTargetMs, err := getMountState(stagingTargetPath)
|
||||
stagingTargetMs, err := ns.getMountState(stagingTargetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetMs, err := getMountState(targetPath)
|
||||
targetMs, err := ns.getMountState(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -230,7 +230,7 @@ func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
|
||||
) error {
|
||||
// Check if there is anything to restore.
|
||||
|
||||
stagingTargetMs, err := getMountState(stagingTargetPath)
|
||||
stagingTargetMs, err := ns.getMountState(stagingTargetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -43,7 +43,9 @@ type NodeServer struct {
|
||||
*csicommon.DefaultNodeServer
|
||||
// A map storing all volumes with ongoing operations so that additional operations
|
||||
// for that same volume (as defined by VolumeID) return an Aborted error
|
||||
VolumeLocks *util.VolumeLocks
|
||||
VolumeLocks *util.VolumeLocks
|
||||
kernelMountOptions string
|
||||
fuseMountOptions string
|
||||
}
|
||||
|
||||
func getCredentialsForVolume(
|
||||
@ -174,7 +176,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
}
|
||||
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -225,7 +227,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (*NodeServer) mount(
|
||||
func (ns *NodeServer) mount(
|
||||
ctx context.Context,
|
||||
mnt mounter.VolumeMounter,
|
||||
volOptions *store.VolumeOptions,
|
||||
@ -244,6 +246,13 @@ func (*NodeServer) mount(
|
||||
|
||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
|
||||
|
||||
switch mnt.(type) {
|
||||
case *mounter.FuseMounter:
|
||||
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, ns.fuseMountOptions)
|
||||
case *mounter.KernelMounter:
|
||||
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, ns.kernelMountOptions)
|
||||
}
|
||||
|
||||
const readOnly = "ro"
|
||||
|
||||
if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
||||
@ -417,7 +426,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
// Ensure staging target path is a mountpoint.
|
||||
|
||||
if isMnt, err := util.IsMountPoint(stagingTargetPath); err != nil {
|
||||
if isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath); err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -429,7 +438,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
// Check if the volume is already mounted
|
||||
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -473,7 +482,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called
|
||||
// at same time, an explicit locking at time of nodeunpublish is not required.
|
||||
targetPath := req.GetTargetPath()
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -542,7 +551,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -628,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
}
|
||||
|
||||
if stat.Mode().IsDir() {
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, targetPath)
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
|
||||
|
@ -24,12 +24,14 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
// DefaultNodeServer stores driver object.
|
||||
type DefaultNodeServer struct {
|
||||
Driver *CSIDriver
|
||||
Type string
|
||||
Driver *CSIDriver
|
||||
Type string
|
||||
Mounter mount.Interface
|
||||
}
|
||||
|
||||
// NodeExpandVolume returns unimplemented response.
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
func parseEndpoint(ep string) (string, string, error) {
|
||||
@ -61,8 +62,9 @@ func NewDefaultNodeServer(d *CSIDriver, t string, topology map[string]string) *D
|
||||
d.topology = topology
|
||||
|
||||
return &DefaultNodeServer{
|
||||
Driver: d,
|
||||
Type: t,
|
||||
Driver: d,
|
||||
Type: t,
|
||||
Mounter: mount.New(""),
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,8 +231,12 @@ func panicHandler(
|
||||
// requested by the NodeGetVolumeStats CSI procedure.
|
||||
// It is shared for FileMode volumes, both the CephFS and RBD NodeServers call
|
||||
// this.
|
||||
func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
func FilesystemNodeGetVolumeStats(
|
||||
ctx context.Context,
|
||||
mounter mount.Interface,
|
||||
targetPath string,
|
||||
) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
isMnt, err := util.IsMountPoint(mounter, targetPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
var fakeID = "fake-id"
|
||||
@ -87,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {
|
||||
|
||||
// retry until a mountpoint is found
|
||||
for {
|
||||
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), cwd)
|
||||
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
|
||||
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
|
||||
// try again with the parent directory
|
||||
cwd = filepath.Dir(cwd)
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
// Driver contains the default identity,node and controller struct.
|
||||
@ -73,11 +72,8 @@ func NewReplicationServer(c *rbd.ControllerServer) *rbd.ReplicationServer {
|
||||
|
||||
// NewNodeServer initialize a node server for rbd CSI driver.
|
||||
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) (*rbd.NodeServer, error) {
|
||||
mounter := mount.New("")
|
||||
|
||||
return &rbd.NodeServer{
|
||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
|
||||
Mounter: mounter,
|
||||
VolumeLocks: util.NewVolumeLocks(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
// node server spec.
|
||||
type NodeServer struct {
|
||||
*csicommon.DefaultNodeServer
|
||||
Mounter mount.Interface
|
||||
// A map storing all volumes with ongoing operations so that additional operations
|
||||
// for that same volume (as defined by VolumeID) return an Aborted error
|
||||
VolumeLocks *util.VolumeLocks
|
||||
@ -806,7 +805,7 @@ func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *
|
||||
if readOnly {
|
||||
mountOptions = append(mountOptions, "ro")
|
||||
}
|
||||
if err := util.Mount(stagingPath, targetPath, fsType, mountOptions); err != nil {
|
||||
if err := util.Mount(ns.Mounter, stagingPath, targetPath, fsType, mountOptions); err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1241,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
}
|
||||
|
||||
if stat.Mode().IsDir() {
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, targetPath)
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
||||
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
|
||||
return blockNodeGetVolumeStats(ctx, targetPath)
|
||||
}
|
||||
|
@ -58,6 +58,22 @@ func RoundOffBytes(bytes int64) int64 {
|
||||
return num
|
||||
}
|
||||
|
||||
// RoundOffCephFSVolSize rounds up the bytes to 4MiB if the request is less
|
||||
// than 4MiB or if its greater it rounds up to multiple of 4MiB.
|
||||
func RoundOffCephFSVolSize(bytes int64) int64 {
|
||||
// Minimum supported size is 1MiB in CephCSI, if the request is <4MiB,
|
||||
// round off to 4MiB.
|
||||
if bytes < helpers.MiB {
|
||||
return 4 * helpers.MiB
|
||||
}
|
||||
|
||||
bytes /= helpers.MiB
|
||||
|
||||
bytes = int64(math.Ceil(float64(bytes)/4) * 4)
|
||||
|
||||
return RoundOffBytes(bytes * helpers.MiB)
|
||||
}
|
||||
|
||||
// variables which will be set during the build time.
|
||||
var (
|
||||
// GitCommit tell the latest git commit image is built from.
|
||||
@ -82,7 +98,12 @@ type Config struct {
|
||||
MetricsPath string // path of prometheus endpoint where metrics will be available
|
||||
HistogramOption string // Histogram option for grpc metrics, should be comma separated value,
|
||||
// ex:= "0.5,2,6" where start=0.5 factor=2, count=6
|
||||
MetricsIP string // TCP port for liveness/ metrics requests
|
||||
MetricsIP string // TCP port for liveness/ metrics requests
|
||||
|
||||
// mount option related flags
|
||||
KernelMountOptions string // Comma separated string of mount options accepted by cephfs kernel mounter
|
||||
FuseMountOptions string // Comma separated string of mount options accepted by ceph-fuse mounter
|
||||
|
||||
PidLimit int // PID limit to configure through cgroups")
|
||||
MetricsPort int // TCP port for liveness/grpc metrics requests
|
||||
PollTime time.Duration // time interval in seconds between each poll
|
||||
@ -304,9 +325,8 @@ func checkDirExists(p string) bool {
|
||||
}
|
||||
|
||||
// IsMountPoint checks if the given path is mountpoint or not.
|
||||
func IsMountPoint(p string) (bool, error) {
|
||||
dummyMount := mount.New("")
|
||||
notMnt, err := dummyMount.IsLikelyNotMountPoint(p)
|
||||
func IsMountPoint(mounter mount.Interface, p string) (bool, error) {
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(p)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -327,10 +347,8 @@ func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) {
|
||||
}
|
||||
|
||||
// Mount mounts the source to target path.
|
||||
func Mount(source, target, fstype string, options []string) error {
|
||||
dummyMount := mount.New("")
|
||||
|
||||
return dummyMount.MountSensitiveWithoutSystemd(source, target, fstype, options, nil)
|
||||
func Mount(mounter mount.Interface, source, target, fstype string, options []string) error {
|
||||
return mounter.MountSensitiveWithoutSystemd(source, target, fstype, options, nil)
|
||||
}
|
||||
|
||||
// MountOptionsAdd adds the `add` mount options to the `options` and returns a
|
||||
|
@ -352,3 +352,52 @@ func TestCheckKernelSupport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundOffCephFSVolSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
size int64
|
||||
want int64
|
||||
}{
|
||||
{
|
||||
"1000kiB conversion",
|
||||
1000,
|
||||
4194304, // 4 MiB
|
||||
},
|
||||
{
|
||||
"1MiB conversions",
|
||||
1048576,
|
||||
4194304, // 4 MiB
|
||||
},
|
||||
{
|
||||
"1.5Mib conversion",
|
||||
1677722,
|
||||
4194304, // 4 MiB
|
||||
},
|
||||
{
|
||||
"1023MiB conversion",
|
||||
1072693248,
|
||||
1073741824, // 1024 MiB
|
||||
},
|
||||
{
|
||||
"1.5GiB conversion",
|
||||
1585446912,
|
||||
2147483648, // 2 GiB
|
||||
},
|
||||
{
|
||||
"1555MiB conversion",
|
||||
1630535680,
|
||||
2147483648, // 2 GiB
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
ts := tt
|
||||
t.Run(ts.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := RoundOffCephFSVolSize(ts.size); got != ts.want {
|
||||
t.Errorf("RoundOffCephFSVolSize() = %v, want %v", got, ts.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
57
vendor/github.com/IBM/keyprotect-go-client/README.md
generated
vendored
57
vendor/github.com/IBM/keyprotect-go-client/README.md
generated
vendored
@ -119,18 +119,18 @@ crkID := key.ID
|
||||
myDEK := []byte{"thisisadataencryptionkey"}
|
||||
// Do some encryption with myDEK
|
||||
// Wrap the DEK so we can safely store it
|
||||
wrappedDEK, err := client.Wrap(ctx, crkID, myDEK, nil)
|
||||
wrappedDEK, err := client.Wrap(ctx, crkIDOrAlias, myDEK, nil)
|
||||
|
||||
|
||||
// Unwrap the DEK
|
||||
dek, err := client.Unwrap(ctx, crkID, wrappedDEK, nil)
|
||||
dek, err := client.Unwrap(ctx, crkIDOrAlias, wrappedDEK, nil)
|
||||
// Do some encryption/decryption using the DEK
|
||||
// Discard the DEK
|
||||
dek = nil
|
||||
```
|
||||
|
||||
Note you can also pass additional authentication data (AAD) to wrap and unwrap calls
|
||||
to provide another level of protection for your DEK. The AAD is a string array with
|
||||
to provide another level of protection for your DEK. The AAD is a string array with
|
||||
each element up to 255 chars. For example:
|
||||
|
||||
```go
|
||||
@ -138,11 +138,11 @@ myAAD := []string{"First aad string", "second aad string", "third aad string"}
|
||||
myDEK := []byte{"thisisadataencryptionkey"}
|
||||
// Do some encryption with myDEK
|
||||
// Wrap the DEK so we can safely store it
|
||||
wrappedDEK, err := client.Wrap(ctx, crkID, myDEK, &myAAD)
|
||||
wrappedDEK, err := client.Wrap(ctx, crkIDOrAlias, myDEK, &myAAD)
|
||||
|
||||
|
||||
// Unwrap the DEK
|
||||
dek, err := client.Unwrap(ctx, crkID, wrappedDEK, &myAAD)
|
||||
dek, err := client.Unwrap(ctx, crkIDOrAlias, wrappedDEK, &myAAD)
|
||||
// Do some encryption/decryption using the DEK
|
||||
// Discard the DEK
|
||||
dek = nil
|
||||
@ -151,7 +151,7 @@ dek = nil
|
||||
Have key protect create a DEK for you:
|
||||
|
||||
```go
|
||||
dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkID, nil)
|
||||
dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkIDOrAlias, nil)
|
||||
// Do some encrypt/decrypt with the dek
|
||||
// Discard the DEK
|
||||
dek = nil
|
||||
@ -163,7 +163,7 @@ Can also specify AAD:
|
||||
|
||||
```go
|
||||
myAAD := []string{"First aad string", "second aad string", "third aad string"}
|
||||
dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkID, &myAAD)
|
||||
dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkIDOrAlias, &myAAD)
|
||||
// Do some encrypt/decrypt with the dek
|
||||
// Discard the DEK
|
||||
dek = nil
|
||||
@ -171,3 +171,46 @@ dek = nil
|
||||
// Save the wrapped DEK for later. Call Unwrap to use it, make
|
||||
// sure to specify the same AAD.
|
||||
```
|
||||
### Fetching List Key Versions With Parameters.
|
||||
|
||||
```go
|
||||
|
||||
limit := uint32(2)
|
||||
offset := uint32(0)
|
||||
totalCount := true
|
||||
|
||||
listkeyVersionsOptions := &kp.ListKeyVersionsOptions{
|
||||
Limit : &limit,
|
||||
Offset : &offset,
|
||||
TotalCount : &totalCount,
|
||||
}
|
||||
|
||||
keyVersions, err := client.ListKeyVersions(ctx, "key_id_or_alias", listkeyVersionsOptions)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(keyVersions)
|
||||
```
|
||||
|
||||
### Fetching List Key With Parameters.
|
||||
|
||||
```go
|
||||
|
||||
limit := uint32(5)
|
||||
offset := uint32(0)
|
||||
extractable := false
|
||||
keyStates := []kp.KeyState{kp.KeyState(kp.Active), kp.KeyState(kp.Suspended)}
|
||||
|
||||
listKeysOptions := &kp.ListKeysOptions{
|
||||
Limit : &limit,
|
||||
Offset : &offset,
|
||||
Extractable : &extractable,
|
||||
State : keyStates,
|
||||
}
|
||||
|
||||
keys, err := client.ListKeys(ctx, listKeysOptions)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(keys)
|
||||
```
|
8
vendor/github.com/IBM/keyprotect-go-client/import_token.go
generated
vendored
8
vendor/github.com/IBM/keyprotect-go-client/import_token.go
generated
vendored
@ -32,7 +32,13 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const importTokenEncAlgo = "RSAES_OAEP_SHA_256" // currently the only one supported
|
||||
// EncryptionAlgorithm represents the encryption algorithm used for key creation
|
||||
const (
|
||||
// AlgorithmRSAOAEP256 denotes RSA OAEP SHA 256 encryption, supported by KP
|
||||
AlgorithmRSAOAEP256 string = "RSAES_OAEP_SHA_256"
|
||||
// AlgorithmRSAOAEP1 denotes RSA OAEP SHA 1 encryption, supported by HPCS
|
||||
AlgorithmRSAOAEP1 string = "RSAES_OAEP_SHA_1"
|
||||
)
|
||||
|
||||
// ImportTokenCreateRequest represents request parameters for creating a
|
||||
// ImportToken.
|
||||
|
8
vendor/github.com/IBM/keyprotect-go-client/key_alias.go
generated
vendored
8
vendor/github.com/IBM/keyprotect-go-client/key_alias.go
generated
vendored
@ -33,9 +33,9 @@ type KeyAliases struct {
|
||||
// An alias name acts as an identifier just like key ID
|
||||
// For more information please refer to the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-key-alias#create-key-alias-api
|
||||
func (c *Client) CreateKeyAlias(ctx context.Context, aliasName, keyID string) (*KeyAlias, error) {
|
||||
func (c *Client) CreateKeyAlias(ctx context.Context, aliasName, idOrAlias string) (*KeyAlias, error) {
|
||||
|
||||
req, err := c.newRequest("POST", fmt.Sprintf(requestPath, keyID, aliasName), nil)
|
||||
req, err := c.newRequest("POST", fmt.Sprintf(requestPath, idOrAlias, aliasName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -56,9 +56,9 @@ func (c *Client) CreateKeyAlias(ctx context.Context, aliasName, keyID string) (*
|
||||
// DeleteKeyAlias deletes an alias name associated with a key
|
||||
// For more information please refer to the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-key-alias#delete-key-alias
|
||||
func (c *Client) DeleteKeyAlias(ctx context.Context, aliasName, keyID string) error {
|
||||
func (c *Client) DeleteKeyAlias(ctx context.Context, aliasName, idOrAlias string) error {
|
||||
|
||||
req, err := c.newRequest("DELETE", fmt.Sprintf(requestPath, keyID, aliasName), nil)
|
||||
req, err := c.newRequest("DELETE", fmt.Sprintf(requestPath, idOrAlias, aliasName), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
350
vendor/github.com/IBM/keyprotect-go-client/keys.go
generated
vendored
350
vendor/github.com/IBM/keyprotect-go-client/keys.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"log"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -38,6 +39,17 @@ var (
|
||||
// PreferReturn designates the value for the "Prefer" header.
|
||||
type PreferReturn int
|
||||
|
||||
type KeyState uint32
|
||||
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-key-states
|
||||
const (
|
||||
Active KeyState = iota + 1
|
||||
Suspended
|
||||
Deactivated
|
||||
_
|
||||
Destroyed
|
||||
)
|
||||
|
||||
// Key represents a key as returned by the KP API.
|
||||
type Key struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
@ -65,6 +77,9 @@ type Key struct {
|
||||
Deleted *bool `json:"deleted,omitempty"`
|
||||
DeletedBy *string `json:"deletedBy,omitempty"`
|
||||
DeletionDate *time.Time `json:"deletionDate,omitempty"`
|
||||
PurgeAllowed *bool `json:"purgeAllowed,omitempty"`
|
||||
PurgeAllowedFrom *time.Time `json:"purgeAllowedFrom,omitempty"`
|
||||
PurgeScheduledOn *time.Time `json:"purgeScheduledOn,omitempty"`
|
||||
DualAuthDelete *DualAuth `json:"dualAuthDelete,omitempty"`
|
||||
}
|
||||
|
||||
@ -80,13 +95,27 @@ type Keys struct {
|
||||
Keys []Key `json:"resources"`
|
||||
}
|
||||
|
||||
type KeyVersionsMetadata struct {
|
||||
CollectionType string `json:"collectionType"`
|
||||
CollectionTotal *uint32 `json:"collectionTotal"`
|
||||
TotalCount *uint32 `json:"totalCount,omitempty"`
|
||||
}
|
||||
|
||||
type KeyVersions struct {
|
||||
Metadata KeyVersionsMetadata `json:"metadata"`
|
||||
KeyVersion []KeyVersion `json:"resources"`
|
||||
}
|
||||
|
||||
// KeysActionRequest represents request parameters for a key action
|
||||
// API call.
|
||||
type KeysActionRequest struct {
|
||||
PlainText string `json:"plaintext,omitempty"`
|
||||
AAD []string `json:"aad,omitempty"`
|
||||
CipherText string `json:"ciphertext,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
PlainText string `json:"plaintext,omitempty"`
|
||||
AAD []string `json:"aad,omitempty"`
|
||||
CipherText string `json:"ciphertext,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
EncryptedNonce string `json:"encryptedNonce,omitempty"`
|
||||
IV string `json:"iv,omitempty"`
|
||||
EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"`
|
||||
}
|
||||
|
||||
type KeyVersion struct {
|
||||
@ -101,23 +130,14 @@ func (c *Client) CreateKey(ctx context.Context, name string, expiration *time.Ti
|
||||
|
||||
// CreateImportedKey creates a new KP key from the given key material.
|
||||
func (c *Client) CreateImportedKey(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool) (*Key, error) {
|
||||
key := Key{
|
||||
Name: name,
|
||||
Type: keyType,
|
||||
Extractable: extractable,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
if payload != "" && encryptedNonce != "" && iv != "" {
|
||||
key.EncryptedNonce = encryptedNonce
|
||||
key.IV = iv
|
||||
key.EncryptionAlgorithm = importTokenEncAlgo
|
||||
}
|
||||
|
||||
if expiration != nil {
|
||||
key.Expiration = expiration
|
||||
}
|
||||
key := c.createKeyTemplate(ctx, name, expiration, payload, encryptedNonce, iv, extractable, nil, AlgorithmRSAOAEP256)
|
||||
return c.createKey(ctx, key)
|
||||
}
|
||||
|
||||
// CreateImportedKeyWithSHA1 creates a new KP key from the given key material
|
||||
// using RSAES OAEP SHA 1 as encryption algorithm.
|
||||
func (c *Client) CreateImportedKeyWithSHA1(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool, aliases []string) (*Key, error) {
|
||||
key := c.createKeyTemplate(ctx, name, expiration, payload, encryptedNonce, iv, extractable, aliases, AlgorithmRSAOAEP1)
|
||||
return c.createKey(ctx, key)
|
||||
}
|
||||
|
||||
@ -160,25 +180,33 @@ func (c *Client) CreateKeyWithAliases(ctx context.Context, name string, expirati
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-import-root-keys#import-root-key-api
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-import-standard-keys#import-standard-key-gui
|
||||
func (c *Client) CreateImportedKeyWithAliases(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool, aliases []string) (*Key, error) {
|
||||
key := c.createKeyTemplate(ctx, name, expiration, payload, encryptedNonce, iv, extractable, aliases, AlgorithmRSAOAEP256)
|
||||
return c.createKey(ctx, key)
|
||||
}
|
||||
|
||||
func (c *Client) createKeyTemplate(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool, aliases []string, encryptionAlgorithm string) Key {
|
||||
key := Key{
|
||||
Name: name,
|
||||
Type: keyType,
|
||||
Extractable: extractable,
|
||||
Payload: payload,
|
||||
Aliases: aliases,
|
||||
}
|
||||
|
||||
if aliases != nil {
|
||||
key.Aliases = aliases
|
||||
}
|
||||
|
||||
if !extractable && payload != "" && encryptedNonce != "" && iv != "" {
|
||||
key.EncryptedNonce = encryptedNonce
|
||||
key.IV = iv
|
||||
key.EncryptionAlgorithm = importTokenEncAlgo
|
||||
key.EncryptionAlgorithm = encryptionAlgorithm
|
||||
}
|
||||
|
||||
if expiration != nil {
|
||||
key.Expiration = expiration
|
||||
}
|
||||
|
||||
return c.createKey(ctx, key)
|
||||
return key
|
||||
}
|
||||
|
||||
func (c *Client) createKey(ctx context.Context, key Key) (*Key, error) {
|
||||
@ -203,6 +231,36 @@ func (c *Client) createKey(ctx context.Context, key Key) (*Key, error) {
|
||||
return &keysResponse.Keys[0], nil
|
||||
}
|
||||
|
||||
// SetKeyRing method transfers a key associated with one key ring to another key ring
|
||||
// For more information please refer to the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-grouping-keys#transfer-key-key-ring
|
||||
func (c *Client) SetKeyRing(ctx context.Context, idOrAlias, newKeyRingID string) (*Key, error) {
|
||||
if idOrAlias == "" {
|
||||
return nil, fmt.Errorf("Please provide a valid key ID or alias")
|
||||
}
|
||||
|
||||
if newKeyRingID == "" {
|
||||
return nil, fmt.Errorf("Please provide a valid key ring id")
|
||||
}
|
||||
|
||||
keyRingRequestBody := struct {
|
||||
KeyRingID string
|
||||
}{
|
||||
KeyRingID: newKeyRingID,
|
||||
}
|
||||
|
||||
req, err := c.newRequest("PATCH", fmt.Sprintf("keys/%s", idOrAlias), keyRingRequestBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := Keys{}
|
||||
if _, err := c.do(ctx, req, &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response.Keys[0], nil
|
||||
}
|
||||
|
||||
// GetKeys retrieves a collection of keys that can be paged through.
|
||||
func (c *Client) GetKeys(ctx context.Context, limit int, offset int) (*Keys, error) {
|
||||
if limit == 0 {
|
||||
@ -228,6 +286,55 @@ func (c *Client) GetKeys(ctx context.Context, limit int, offset int) (*Keys, err
|
||||
return &keys, nil
|
||||
}
|
||||
|
||||
//ListKeysOptions struct to add the query parameters for the List Keys function
|
||||
type ListKeysOptions struct {
|
||||
Extractable *bool
|
||||
Limit *uint32
|
||||
Offset *uint32
|
||||
State []KeyState
|
||||
}
|
||||
|
||||
// ListKeys retrieves a list of keys that are stored in your Key Protect service instance.
|
||||
// https://cloud.ibm.com/apidocs/key-protect#getkeys
|
||||
func (c *Client) ListKeys(ctx context.Context, listKeysOptions *ListKeysOptions) (*Keys, error) {
|
||||
|
||||
req, err := c.newRequest("GET", "keys", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// extracting the query parameters and encoding the same in the request url
|
||||
if listKeysOptions != nil {
|
||||
values := req.URL.Query()
|
||||
if listKeysOptions.Limit != nil {
|
||||
values.Set("limit", fmt.Sprint(*listKeysOptions.Limit))
|
||||
}
|
||||
if listKeysOptions.Offset != nil {
|
||||
values.Set("offset", fmt.Sprint(*listKeysOptions.Offset))
|
||||
}
|
||||
if listKeysOptions.State != nil {
|
||||
var states []string
|
||||
for _, i := range listKeysOptions.State {
|
||||
states = append(states, strconv.Itoa(int(i)))
|
||||
}
|
||||
|
||||
values.Set("state", strings.Join(states, ","))
|
||||
}
|
||||
if listKeysOptions.Extractable != nil {
|
||||
values.Set("extractable", fmt.Sprint(*listKeysOptions.Extractable))
|
||||
}
|
||||
req.URL.RawQuery = values.Encode()
|
||||
}
|
||||
|
||||
keys := Keys{}
|
||||
_, err = c.do(ctx, req, &keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &keys, nil
|
||||
}
|
||||
|
||||
// GetKey retrieves a key by ID or alias name.
|
||||
// For more information on Key Alias please refer to the link below
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-retrieve-key
|
||||
@ -245,10 +352,10 @@ func (c *Client) GetKeyMetadata(ctx context.Context, idOrAlias string) (*Key, er
|
||||
return c.getKey(ctx, idOrAlias, "keys/%s/metadata")
|
||||
}
|
||||
|
||||
func (c *Client) getKey(ctx context.Context, id string, path string) (*Key, error) {
|
||||
func (c *Client) getKey(ctx context.Context, idOrAlias string, path string) (*Key, error) {
|
||||
keys := Keys{}
|
||||
|
||||
req, err := c.newRequest("GET", fmt.Sprintf(path, id), nil)
|
||||
req, err := c.newRequest("GET", fmt.Sprintf(path, idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -267,10 +374,51 @@ type ForceOpt struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// DeleteKey deletes a key resource by specifying the ID of the key.
|
||||
func (c *Client) DeleteKey(ctx context.Context, id string, prefer PreferReturn, callOpts ...CallOpt) (*Key, error) {
|
||||
// ListKeyVersionsOptions struct to add the query parameters for the ListKeyVersions function
|
||||
type ListKeyVersionsOptions struct {
|
||||
Limit *uint32
|
||||
Offset *uint32
|
||||
TotalCount *bool
|
||||
}
|
||||
|
||||
req, err := c.newRequest("DELETE", fmt.Sprintf("keys/%s", id), nil)
|
||||
// ListKeyVersions gets all the versions of the key resource by specifying ID of the key and/or optional parameters
|
||||
// https://cloud.ibm.com/apidocs/key-protect#getkeyversions
|
||||
func (c *Client) ListKeyVersions(ctx context.Context, idOrAlias string, listKeyVersionsOptions *ListKeyVersionsOptions) (*KeyVersions, error) {
|
||||
keyVersion := KeyVersions{}
|
||||
// forming the request
|
||||
req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/versions", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// extracting the query parameters and encoding the same in the request url
|
||||
if listKeyVersionsOptions != nil {
|
||||
values := req.URL.Query()
|
||||
if listKeyVersionsOptions.Limit != nil {
|
||||
values.Set("limit", fmt.Sprint(*listKeyVersionsOptions.Limit))
|
||||
}
|
||||
if listKeyVersionsOptions.Offset != nil {
|
||||
values.Set("offset", fmt.Sprint(*listKeyVersionsOptions.Offset))
|
||||
}
|
||||
if listKeyVersionsOptions.TotalCount != nil {
|
||||
values.Set("totalCount", fmt.Sprint(*listKeyVersionsOptions.TotalCount))
|
||||
}
|
||||
req.URL.RawQuery = values.Encode()
|
||||
}
|
||||
|
||||
//making a request
|
||||
_, err = c.do(ctx, req, &keyVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &keyVersion, nil
|
||||
}
|
||||
|
||||
// DeleteKey deletes a key resource by specifying the ID of the key.
|
||||
func (c *Client) DeleteKey(ctx context.Context, idOrAlias string, prefer PreferReturn, callOpts ...CallOpt) (*Key, error) {
|
||||
|
||||
req, err := c.newRequest("DELETE", fmt.Sprintf("keys/%s", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -301,12 +449,37 @@ func (c *Client) DeleteKey(ctx context.Context, id string, prefer PreferReturn,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Purge key method shreds all the metadata and registrations associated with a key that has been
|
||||
// deleted. The purge operation is allowed to be performed on a key from 4 hours after its deletion
|
||||
// and its action is irreversible.
|
||||
// For more information please refer to the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-delete-keys#delete-keys-key-purge
|
||||
func (c *Client) PurgeKey(ctx context.Context, idOrAlias string, prefer PreferReturn) (*Key, error) {
|
||||
req, err := c.newRequest("DELETE", fmt.Sprintf("keys/%s/purge", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Prefer", preferHeaders[prefer])
|
||||
|
||||
keys := Keys{}
|
||||
_, err = c.do(ctx, req, &keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(keys.Keys) > 0 {
|
||||
return &keys.Keys[0], nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// RestoreKey method reverts a delete key status to active key
|
||||
// This method performs restore of any key from deleted state to active state.
|
||||
// For more information please refer to the link below:
|
||||
// https://cloud.ibm.com/dowcs/key-protect?topic=key-protect-restore-keys
|
||||
func (c *Client) RestoreKey(ctx context.Context, id string) (*Key, error) {
|
||||
req, err := c.newRequest("POST", fmt.Sprintf("keys/%s/restore", id), nil)
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-restore-keys
|
||||
func (c *Client) RestoreKey(ctx context.Context, idOrAlias string) (*Key, error) {
|
||||
req, err := c.newRequest("POST", fmt.Sprintf("keys/%s/restore", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -322,17 +495,17 @@ func (c *Client) RestoreKey(ctx context.Context, id string) (*Key, error) {
|
||||
}
|
||||
|
||||
// Wrap calls the wrap action with the given plain text.
|
||||
func (c *Client) Wrap(ctx context.Context, id string, plainText []byte, additionalAuthData *[]string) ([]byte, error) {
|
||||
_, ct, err := c.wrap(ctx, id, plainText, additionalAuthData)
|
||||
func (c *Client) Wrap(ctx context.Context, idOrAlias string, plainText []byte, additionalAuthData *[]string) ([]byte, error) {
|
||||
_, ct, err := c.wrap(ctx, idOrAlias, plainText, additionalAuthData)
|
||||
return ct, err
|
||||
}
|
||||
|
||||
// WrapCreateDEK calls the wrap action without plain text.
|
||||
func (c *Client) WrapCreateDEK(ctx context.Context, id string, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
return c.wrap(ctx, id, nil, additionalAuthData)
|
||||
func (c *Client) WrapCreateDEK(ctx context.Context, idOrAlias string, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
return c.wrap(ctx, idOrAlias, nil, additionalAuthData)
|
||||
}
|
||||
|
||||
func (c *Client) wrap(ctx context.Context, id string, plainText []byte, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
func (c *Client) wrap(ctx context.Context, idOrAlias string, plainText []byte, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
keysActionReq := &KeysActionRequest{}
|
||||
|
||||
if plainText != nil {
|
||||
@ -347,7 +520,7 @@ func (c *Client) wrap(ctx context.Context, id string, plainText []byte, addition
|
||||
keysActionReq.AAD = *additionalAuthData
|
||||
}
|
||||
|
||||
keysAction, err := c.doKeysAction(ctx, id, "wrap", keysActionReq)
|
||||
keysAction, err := c.doKeysAction(ctx, idOrAlias, "wrap", keysActionReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -359,8 +532,8 @@ func (c *Client) wrap(ctx context.Context, id string, plainText []byte, addition
|
||||
}
|
||||
|
||||
// Unwrap is deprecated since it returns only plaintext and doesn't know how to handle rotation.
|
||||
func (c *Client) Unwrap(ctx context.Context, id string, cipherText []byte, additionalAuthData *[]string) ([]byte, error) {
|
||||
plainText, _, err := c.UnwrapV2(ctx, id, cipherText, additionalAuthData)
|
||||
func (c *Client) Unwrap(ctx context.Context, idOrAlias string, cipherText []byte, additionalAuthData *[]string) ([]byte, error) {
|
||||
plainText, _, err := c.UnwrapV2(ctx, idOrAlias, cipherText, additionalAuthData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -368,7 +541,7 @@ func (c *Client) Unwrap(ctx context.Context, id string, cipherText []byte, addit
|
||||
}
|
||||
|
||||
// Unwrap with rotation support.
|
||||
func (c *Client) UnwrapV2(ctx context.Context, id string, cipherText []byte, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
func (c *Client) UnwrapV2(ctx context.Context, idOrAlias string, cipherText []byte, additionalAuthData *[]string) ([]byte, []byte, error) {
|
||||
|
||||
keysAction := &KeysActionRequest{
|
||||
CipherText: string(cipherText),
|
||||
@ -378,7 +551,7 @@ func (c *Client) UnwrapV2(ctx context.Context, id string, cipherText []byte, add
|
||||
keysAction.AAD = *additionalAuthData
|
||||
}
|
||||
|
||||
respAction, err := c.doKeysAction(ctx, id, "unwrap", keysAction)
|
||||
respAction, err := c.doKeysAction(ctx, idOrAlias, "unwrap", keysAction)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -390,13 +563,13 @@ func (c *Client) UnwrapV2(ctx context.Context, id string, cipherText []byte, add
|
||||
}
|
||||
|
||||
// Rotate rotates a CRK.
|
||||
func (c *Client) Rotate(ctx context.Context, id, payload string) error {
|
||||
func (c *Client) Rotate(ctx context.Context, idOrAlias, payload string) error {
|
||||
|
||||
actionReq := &KeysActionRequest{
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
_, err := c.doKeysAction(ctx, id, "rotate", actionReq)
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "rotate", actionReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -404,12 +577,78 @@ func (c *Client) Rotate(ctx context.Context, id, payload string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type KeyPayload struct {
|
||||
payload string
|
||||
encryptedNonce string
|
||||
iv string
|
||||
encryptionAlgorithm string
|
||||
}
|
||||
|
||||
func NewKeyPayload(payload, encryptedNonce, iv string) KeyPayload {
|
||||
kp := KeyPayload{
|
||||
payload: payload,
|
||||
encryptedNonce: encryptedNonce,
|
||||
iv: iv,
|
||||
}
|
||||
return kp
|
||||
}
|
||||
|
||||
// EncryptWithRSA256 sets the encryption algorithm for key create to RSAES_OAEP_SHA_256
|
||||
// This is the default algorithm for key creation under Key Protect service
|
||||
func (kp KeyPayload) WithRSA256() KeyPayload {
|
||||
kp.encryptionAlgorithm = "RSAES_OAEP_SHA_256"
|
||||
return kp
|
||||
}
|
||||
|
||||
// EncryptWithRSA1 sets the encryption algorithm for key create to RSAES_OAEP_SHA_1
|
||||
// This algorithm is only supported by the Hyper Protect(HPCS) service
|
||||
func (kp KeyPayload) WithRSA1() KeyPayload {
|
||||
kp.encryptionAlgorithm = "RSAES_OAEP_SHA_1"
|
||||
return kp
|
||||
}
|
||||
|
||||
// RotateV2 methods supports rotation of a root key with or without payload and also rotate a
|
||||
// securely imported root key.
|
||||
func (c *Client) RotateV2(ctx context.Context, idOrAlias string, new_key *KeyPayload) error {
|
||||
var actionReq *KeysActionRequest
|
||||
if new_key != nil {
|
||||
actionReq = &KeysActionRequest{
|
||||
Payload: new_key.payload,
|
||||
EncryptedNonce: new_key.encryptedNonce,
|
||||
IV: new_key.iv,
|
||||
EncryptionAlgorithm: new_key.encryptionAlgorithm,
|
||||
}
|
||||
}
|
||||
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "rotate", actionReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncAssociatedResources method executes the sync request which verifies and updates
|
||||
// the resources associated with the key.
|
||||
// For more information please refer to the link below
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-sync-associated-resources
|
||||
func (c *Client) SyncAssociatedResources(ctx context.Context, idOrAlias string) error {
|
||||
req, err := c.newRequest("POST", fmt.Sprintf("keys/%s/actions/sync", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, req, nil)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Disable a key. The key will not be deleted but it will not be active
|
||||
// and key operations cannot be performed on a disabled key.
|
||||
// For more information can refer to the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-disable-keys
|
||||
func (c *Client) DisableKey(ctx context.Context, id string) error {
|
||||
_, err := c.doKeysAction(ctx, id, "disable", nil)
|
||||
func (c *Client) DisableKey(ctx context.Context, idOrAlias string) error {
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "disable", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -418,8 +657,8 @@ func (c *Client) DisableKey(ctx context.Context, id string) error {
|
||||
// Note: This does not recover Deleted keys.
|
||||
// For more information can refer to the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-disable-keys#enable-api
|
||||
func (c *Client) EnableKey(ctx context.Context, id string) error {
|
||||
_, err := c.doKeysAction(ctx, id, "enable", nil)
|
||||
func (c *Client) EnableKey(ctx context.Context, idOrAlias string) error {
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "enable", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -427,8 +666,8 @@ func (c *Client) EnableKey(ctx context.Context, id string) error {
|
||||
// After the key is set to deletion it can be deleted by another user who has Manager access.
|
||||
// For more information refer to the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-delete-dual-auth-keys#set-key-deletion-api
|
||||
func (c *Client) InitiateDualAuthDelete(ctx context.Context, id string) error {
|
||||
_, err := c.doKeysAction(ctx, id, "setKeyForDeletion", nil)
|
||||
func (c *Client) InitiateDualAuthDelete(ctx context.Context, idOrAlias string) error {
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "setKeyForDeletion", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -436,25 +675,20 @@ func (c *Client) InitiateDualAuthDelete(ctx context.Context, id string) error {
|
||||
// be prevented from getting deleted by unsetting the key for deletion.
|
||||
// For more information refer to the Key Protect docs in the link below:
|
||||
//https://cloud.ibm.com/docs/key-protect?topic=key-protect-delete-dual-auth-keys#unset-key-deletion-api
|
||||
func (c *Client) CancelDualAuthDelete(ctx context.Context, id string) error {
|
||||
_, err := c.doKeysAction(ctx, id, "unsetKeyForDeletion", nil)
|
||||
func (c *Client) CancelDualAuthDelete(ctx context.Context, idOrAlias string) error {
|
||||
_, err := c.doKeysAction(ctx, idOrAlias, "unsetKeyForDeletion", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// doKeysAction calls the KP Client to perform an action on a key.
|
||||
func (c *Client) doKeysAction(ctx context.Context, id string, action string, keysActionReq *KeysActionRequest) (*KeysActionRequest, error) {
|
||||
func (c *Client) doKeysAction(ctx context.Context, idOrAlias string, action string, keysActionReq *KeysActionRequest) (*KeysActionRequest, error) {
|
||||
keyActionRsp := KeysActionRequest{}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("action", action)
|
||||
|
||||
req, err := c.newRequest("POST", fmt.Sprintf("keys/%s", id), keysActionReq)
|
||||
req, err := c.newRequest("POST", fmt.Sprintf("keys/%s/actions/%s", idOrAlias, action), keysActionReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.URL.RawQuery = v.Encode()
|
||||
|
||||
_, err = c.do(ctx, req, &keyActionRsp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
44
vendor/github.com/IBM/keyprotect-go-client/policy.go
generated
vendored
44
vendor/github.com/IBM/keyprotect-go-client/policy.go
generated
vendored
@ -59,15 +59,15 @@ type Policies struct {
|
||||
Policies []Policy `json:"resources"`
|
||||
}
|
||||
|
||||
// GetPolicy retrieves a policy by Key ID. This function is
|
||||
// GetPolicy retrieves a policy by Key ID or alias. This function is
|
||||
// deprecated, as it only returns one policy and does not let you
|
||||
// select which policy set it will return. It is kept for backward
|
||||
// compatibility on keys with only one rotation policy. Please update
|
||||
// to use the new GetPolicies or Get<type>Policy functions.
|
||||
func (c *Client) GetPolicy(ctx context.Context, id string) (*Policy, error) {
|
||||
func (c *Client) GetPolicy(ctx context.Context, idOrAlias string) (*Policy, error) {
|
||||
policyresponse := Policies{}
|
||||
|
||||
req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", id), nil)
|
||||
req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -84,7 +84,7 @@ func (c *Client) GetPolicy(ctx context.Context, id string) (*Policy, error) {
|
||||
// the rotation interval needed. This function is deprecated as it will only
|
||||
// let you set key rotation policies. To set dual auth and other newer policies
|
||||
// on a key, please use the new SetPolicies of Set<type>Policy functions.
|
||||
func (c *Client) SetPolicy(ctx context.Context, id string, prefer PreferReturn, rotationInterval int) (*Policy, error) {
|
||||
func (c *Client) SetPolicy(ctx context.Context, idOrAlias string, prefer PreferReturn, rotationInterval int) (*Policy, error) {
|
||||
|
||||
policy := Policy{
|
||||
Type: policyType,
|
||||
@ -103,7 +103,7 @@ func (c *Client) SetPolicy(ctx context.Context, id string, prefer PreferReturn,
|
||||
|
||||
policyresponse := Policies{}
|
||||
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest)
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", idOrAlias), &policyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -118,11 +118,11 @@ func (c *Client) SetPolicy(ctx context.Context, id string, prefer PreferReturn,
|
||||
return &policyresponse.Policies[0], nil
|
||||
}
|
||||
|
||||
// GetPolicies retrieves all policies details associated with a Key ID.
|
||||
func (c *Client) GetPolicies(ctx context.Context, id string) ([]Policy, error) {
|
||||
// GetPolicies retrieves all policies details associated with a Key ID or alias.
|
||||
func (c *Client) GetPolicies(ctx context.Context, idOrAlias string) ([]Policy, error) {
|
||||
policyresponse := Policies{}
|
||||
|
||||
req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", id), nil)
|
||||
req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", idOrAlias), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -155,10 +155,10 @@ func (c *Client) getPolicy(ctx context.Context, id, policyType string, policyres
|
||||
// GetRotationPolivy method retrieves rotation policy details of a key
|
||||
// For more information can refet the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-rotation-policy#view-rotation-policy-api
|
||||
func (c *Client) GetRotationPolicy(ctx context.Context, id string) (*Policy, error) {
|
||||
func (c *Client) GetRotationPolicy(ctx context.Context, idOrAlias string) (*Policy, error) {
|
||||
policyresponse := Policies{}
|
||||
|
||||
err := c.getPolicy(ctx, id, RotationPolicy, &policyresponse)
|
||||
err := c.getPolicy(ctx, idOrAlias, RotationPolicy, &policyresponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -173,10 +173,10 @@ func (c *Client) GetRotationPolicy(ctx context.Context, id string) (*Policy, err
|
||||
// GetDualAuthDeletePolicy method retrieves dual auth delete policy details of a key
|
||||
// For more information can refer the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-dual-auth-key-policy#view-dual-auth-key-policy-api
|
||||
func (c *Client) GetDualAuthDeletePolicy(ctx context.Context, id string) (*Policy, error) {
|
||||
func (c *Client) GetDualAuthDeletePolicy(ctx context.Context, idOrAlias string) (*Policy, error) {
|
||||
policyresponse := Policies{}
|
||||
|
||||
err := c.getPolicy(ctx, id, DualAuthDelete, &policyresponse)
|
||||
err := c.getPolicy(ctx, idOrAlias, DualAuthDelete, &policyresponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -188,10 +188,10 @@ func (c *Client) GetDualAuthDeletePolicy(ctx context.Context, id string) (*Polic
|
||||
return &policyresponse.Policies[0], nil
|
||||
}
|
||||
|
||||
func (c *Client) setPolicy(ctx context.Context, id, policyType string, policyRequest Policies) (*Policies, error) {
|
||||
func (c *Client) setPolicy(ctx context.Context, idOrAlias, policyType string, policyRequest Policies) (*Policies, error) {
|
||||
policyresponse := Policies{}
|
||||
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest)
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", idOrAlias), &policyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -207,10 +207,10 @@ func (c *Client) setPolicy(ctx context.Context, id, policyType string, policyReq
|
||||
return &policyresponse, nil
|
||||
}
|
||||
|
||||
// SetRotationPolicy updates the rotation policy associated with a key by specifying key ID and rotation interval.
|
||||
// SetRotationPolicy updates the rotation policy associated with a key by specifying key ID or alias and rotation interval.
|
||||
// For more information can refer the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-rotation-policy#update-rotation-policy-api
|
||||
func (c *Client) SetRotationPolicy(ctx context.Context, id string, rotationInterval int) (*Policy, error) {
|
||||
func (c *Client) SetRotationPolicy(ctx context.Context, idOrAlias string, rotationInterval int) (*Policy, error) {
|
||||
policy := Policy{
|
||||
Type: policyType,
|
||||
Rotation: &Rotation{
|
||||
@ -226,7 +226,7 @@ func (c *Client) SetRotationPolicy(ctx context.Context, id string, rotationInter
|
||||
Policies: []Policy{policy},
|
||||
}
|
||||
|
||||
policyresponse, err := c.setPolicy(ctx, id, RotationPolicy, policyRequest)
|
||||
policyresponse, err := c.setPolicy(ctx, idOrAlias, RotationPolicy, policyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -238,10 +238,10 @@ func (c *Client) SetRotationPolicy(ctx context.Context, id string, rotationInter
|
||||
return &policyresponse.Policies[0], nil
|
||||
}
|
||||
|
||||
// SetDualAuthDeletePolicy updates the dual auth delete policy by passing the key ID and enable detail
|
||||
// SetDualAuthDeletePolicy updates the dual auth delete policy by passing the key ID or alias and enable detail
|
||||
// For more information can refer the Key Protect docs in the link below:
|
||||
// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-dual-auth-key-policy#create-dual-auth-key-policy-api
|
||||
func (c *Client) SetDualAuthDeletePolicy(ctx context.Context, id string, enabled bool) (*Policy, error) {
|
||||
func (c *Client) SetDualAuthDeletePolicy(ctx context.Context, idOrAlias string, enabled bool) (*Policy, error) {
|
||||
policy := Policy{
|
||||
Type: policyType,
|
||||
DualAuth: &DualAuth{
|
||||
@ -257,7 +257,7 @@ func (c *Client) SetDualAuthDeletePolicy(ctx context.Context, id string, enabled
|
||||
Policies: []Policy{policy},
|
||||
}
|
||||
|
||||
policyresponse, err := c.setPolicy(ctx, id, DualAuthDelete, policyRequest)
|
||||
policyresponse, err := c.setPolicy(ctx, idOrAlias, DualAuthDelete, policyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -273,7 +273,7 @@ func (c *Client) SetDualAuthDeletePolicy(ctx context.Context, id string, enabled
|
||||
// To set rotation policy for the key pass the setRotationPolicy parameter as true and set the rotationInterval detail.
|
||||
// To set dual auth delete policy for the key pass the setDualAuthDeletePolicy parameter as true and set the dualAuthEnable detail.
|
||||
// Both the policies can be set or either of the policies can be set.
|
||||
func (c *Client) SetPolicies(ctx context.Context, id string, setRotationPolicy bool, rotationInterval int, setDualAuthDeletePolicy, dualAuthEnable bool) ([]Policy, error) {
|
||||
func (c *Client) SetPolicies(ctx context.Context, idOrAlias string, setRotationPolicy bool, rotationInterval int, setDualAuthDeletePolicy, dualAuthEnable bool) ([]Policy, error) {
|
||||
policies := []Policy{}
|
||||
if setRotationPolicy {
|
||||
rotationPolicy := Policy{
|
||||
@ -304,7 +304,7 @@ func (c *Client) SetPolicies(ctx context.Context, id string, setRotationPolicy b
|
||||
|
||||
policyresponse := Policies{}
|
||||
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest)
|
||||
req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", idOrAlias), &policyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package aws
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.16.5"
|
||||
const goModuleVersion = "1.16.7"
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
generated
vendored
@ -90,7 +90,7 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn
|
||||
out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
|
||||
attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
|
||||
|
||||
// AttempResult Retried states that the attempt was not successful, and
|
||||
// AttemptResult Retried states that the attempt was not successful, and
|
||||
// should be retried.
|
||||
shouldRetry := attemptResult.Retried
|
||||
|
||||
|
6
vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
generated
vendored
@ -407,8 +407,8 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
|
||||
headers = append(headers, hostHeader)
|
||||
signed[hostHeader] = append(signed[hostHeader], host)
|
||||
|
||||
const contentLengthHeader = "content-length"
|
||||
if length > 0 {
|
||||
const contentLengthHeader = "content-length"
|
||||
headers = append(headers, contentLengthHeader)
|
||||
signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
||||
}
|
||||
@ -417,6 +417,10 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
|
||||
if !rule.IsValid(k) {
|
||||
continue // ignored header
|
||||
}
|
||||
if strings.EqualFold(k, contentLengthHeader) {
|
||||
// prevent signing already handled content-length header.
|
||||
continue
|
||||
}
|
||||
|
||||
lowerCaseKey := strings.ToLower(k)
|
||||
if _, ok := signed[lowerCaseKey]; ok {
|
||||
|
8
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
@ -1,3 +1,11 @@
|
||||
# v1.1.14 (2022-07-05)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.13 (2022-06-29)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.12 (2022-06-07)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package configsources
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.1.12"
|
||||
const goModuleVersion = "1.1.14"
|
||||
|
8
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,11 @@
|
||||
# v2.4.8 (2022-07-05)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v2.4.7 (2022-06-29)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v2.4.6 (2022-06-07)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package endpoints
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "2.4.6"
|
||||
const goModuleVersion = "2.4.8"
|
||||
|
8
vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
generated
vendored
@ -1,3 +1,11 @@
|
||||
# v1.9.8 (2022-07-05)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.9.7 (2022-06-29)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.9.6 (2022-06-07)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
@ -3,4 +3,4 @@
|
||||
package presignedurl
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.9.6"
|
||||
const goModuleVersion = "1.9.8"
|
||||
|
8
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
@ -1,3 +1,11 @@
|
||||
# v1.16.9 (2022-07-05)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.16.8 (2022-06-29)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.16.7 (2022-06-07)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
generated
vendored
@ -512,6 +512,9 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
// convert request to a GET request
|
||||
err = query.AddAsGetRequestMiddleware(stack)
|
||||
if err != nil {
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package sts
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.16.7"
|
||||
const goModuleVersion = "1.16.9"
|
||||
|
6
vendor/github.com/aws/smithy-go/CHANGELOG.md
generated
vendored
6
vendor/github.com/aws/smithy-go/CHANGELOG.md
generated
vendored
@ -1,3 +1,9 @@
|
||||
# Release (v1.12.0)
|
||||
|
||||
## Module Highlights
|
||||
* `github.com/aws/smithy-go`: v1.12.0
|
||||
* **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value.
|
||||
|
||||
# Release (v1.11.3)
|
||||
|
||||
## Module Highlights
|
||||
|
2
vendor/github.com/aws/smithy-go/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/smithy-go/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package smithy
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.11.3"
|
||||
const goModuleVersion = "1.12.0"
|
||||
|
79
vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
generated
vendored
79
vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
generated
vendored
@ -7,6 +7,85 @@ import (
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
)
|
||||
|
||||
type isContentTypeAutoSet struct{}
|
||||
|
||||
// SetIsContentTypeDefaultValue returns a Context specifying if the request's
|
||||
// content-type header was set to a default value.
|
||||
func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context {
|
||||
return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault)
|
||||
}
|
||||
|
||||
// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the
|
||||
// request is a default value that was auto assigned by an operation
|
||||
// serializer. Allows middleware post serialization to know if the content-type
|
||||
// was auto set to a default value or not.
|
||||
//
|
||||
// Also returns false if the Context value was never updated to include if
|
||||
// content-type was set to a default value.
|
||||
func GetIsContentTypeDefaultValue(ctx context.Context) bool {
|
||||
v, _ := ctx.Value(isContentTypeAutoSet{}).(bool)
|
||||
return v
|
||||
}
|
||||
|
||||
// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover
|
||||
// middleware to the stack after the operation serializer. This middleware will
|
||||
// remove the content-type header from the request if it was set as a default
|
||||
// value, and no request payload is present.
|
||||
//
|
||||
// Returns error if unable to add the middleware.
|
||||
func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
|
||||
err = stack.Serialize.Insert(removeDefaultContentType{},
|
||||
"OperationSerializer", middleware.After)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add %s serialize middleware, %w",
|
||||
removeDefaultContentType{}.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveNoPayloadDefaultContentTypeRemover removes the
|
||||
// DefaultContentTypeRemover middleware from the stack. Returns an error if
|
||||
// unable to remove the middleware.
|
||||
func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
|
||||
_, err = stack.Serialize.Remove(removeDefaultContentType{}.ID())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove %s serialize middleware, %w",
|
||||
removeDefaultContentType{}.ID(), err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeDefaultContentType provides after serialization middleware that will
|
||||
// remove the content-type header from an HTTP request if the header was set as
|
||||
// a default value by the operation serializer, and there is no request payload.
|
||||
type removeDefaultContentType struct{}
|
||||
|
||||
// ID returns the middleware ID
|
||||
func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" }
|
||||
|
||||
// HandleSerialize implements the serialization middleware.
|
||||
func (removeDefaultContentType) HandleSerialize(
|
||||
ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
|
||||
) (
|
||||
out middleware.SerializeOutput, meta middleware.Metadata, err error,
|
||||
) {
|
||||
req, ok := input.Request.(*Request)
|
||||
if !ok {
|
||||
return out, meta, fmt.Errorf(
|
||||
"unexpected request type %T for removeDefaultContentType middleware",
|
||||
input.Request)
|
||||
}
|
||||
|
||||
if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil {
|
||||
req.Header.Del("Content-Type")
|
||||
input.Request = req
|
||||
}
|
||||
|
||||
return next.HandleSerialize(ctx, input)
|
||||
}
|
||||
|
||||
type headerValue struct {
|
||||
header string
|
||||
value string
|
||||
|
4
vendor/github.com/google/uuid/hash.go
generated
vendored
4
vendor/github.com/google/uuid/hash.go
generated
vendored
@ -26,8 +26,8 @@ var (
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:])
|
||||
h.Write(data)
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
2
vendor/github.com/google/uuid/sql.go
generated
vendored
2
vendor/github.com/google/uuid/sql.go
generated
vendored
@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
|
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
@ -33,7 +34,27 @@ const (
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
35
vendor/github.com/google/uuid/version4.go
generated
vendored
35
vendor/github.com/google/uuid/version4.go
generated
vendored
@ -14,11 +14,21 @@ func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
@ -27,7 +37,10 @@ func New() UUID {
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
return NewRandomFromReader(rander)
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
@ -41,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
12
vendor/github.com/hashicorp/go-retryablehttp/.travis.yml
generated
vendored
12
vendor/github.com/hashicorp/go-retryablehttp/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.4
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make updatedeps test
|
1
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
1
vendor/github.com/hashicorp/go-retryablehttp/README.md
generated
vendored
@ -26,6 +26,7 @@ fails so that the full request can be attempted again. See the
|
||||
details.
|
||||
|
||||
Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required.
|
||||
From 0.6.7 onward, Go 1.13+ is required.
|
||||
|
||||
Example Use
|
||||
===========
|
||||
|
157
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
157
vendor/github.com/hashicorp/go-retryablehttp/client.go
generated
vendored
@ -35,11 +35,12 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -276,12 +277,16 @@ type Logger interface {
|
||||
Printf(string, ...interface{})
|
||||
}
|
||||
|
||||
// LeveledLogger interface implements the basic methods that a logger library needs
|
||||
// LeveledLogger is an interface that can be implemented by any logger or a
|
||||
// logger wrapper to provide leveled logging. The methods accept a message
|
||||
// string and a variadic number of key-value pairs. For log.Printf style
|
||||
// formatting where message string contains a format specifier, use Logger
|
||||
// interface.
|
||||
type LeveledLogger interface {
|
||||
Error(string, ...interface{})
|
||||
Info(string, ...interface{})
|
||||
Debug(string, ...interface{})
|
||||
Warn(string, ...interface{})
|
||||
Error(msg string, keysAndValues ...interface{})
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
Debug(msg string, keysAndValues ...interface{})
|
||||
Warn(msg string, keysAndValues ...interface{})
|
||||
}
|
||||
|
||||
// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions
|
||||
@ -357,6 +362,7 @@ type Client struct {
|
||||
ErrorHandler ErrorHandler
|
||||
|
||||
loggerInit sync.Once
|
||||
clientInit sync.Once
|
||||
}
|
||||
|
||||
// NewClient creates a new Client with default settings.
|
||||
@ -398,21 +404,39 @@ func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bo
|
||||
return false, ctx.Err()
|
||||
}
|
||||
|
||||
// don't propagate other errors
|
||||
shouldRetry, _ := baseRetryPolicy(resp, err)
|
||||
return shouldRetry, nil
|
||||
}
|
||||
|
||||
// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it
|
||||
// propagates errors back instead of returning nil. This allows you to inspect
|
||||
// why it decided to retry or not.
|
||||
func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
// do not retry on context.Canceled or context.DeadlineExceeded
|
||||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
|
||||
return baseRetryPolicy(resp, err)
|
||||
}
|
||||
|
||||
func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
if err != nil {
|
||||
if v, ok := err.(*url.Error); ok {
|
||||
// Don't retry if the error was due to too many redirects.
|
||||
if redirectsErrorRe.MatchString(v.Error()) {
|
||||
return false, nil
|
||||
return false, v
|
||||
}
|
||||
|
||||
// Don't retry if the error was due to an invalid protocol scheme.
|
||||
if schemeErrorRe.MatchString(v.Error()) {
|
||||
return false, nil
|
||||
return false, v
|
||||
}
|
||||
|
||||
// Don't retry if the error was due to TLS cert verification failure.
|
||||
if _, ok := v.Err.(x509.UnknownAuthorityError); ok {
|
||||
return false, nil
|
||||
return false, v
|
||||
}
|
||||
}
|
||||
|
||||
@ -420,12 +444,19 @@ func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// 429 Too Many Requests is recoverable. Sometimes the server puts
|
||||
// a Retry-After response header to indicate when the server is
|
||||
// available to start processing request from client.
|
||||
if resp.StatusCode == http.StatusTooManyRequests {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check the response code. We retry on 500-range responses to allow
|
||||
// the server time to recover, as 500's are typically not permanent
|
||||
// errors and may relate to outages on the server side. This will catch
|
||||
// invalid response codes as well, like 0 and 999.
|
||||
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
|
||||
return true, nil
|
||||
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
@ -434,7 +465,21 @@ func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bo
|
||||
// DefaultBackoff provides a default callback for Client.Backoff which
|
||||
// will perform exponential backoff based on the attempt number and limited
|
||||
// by the provided minimum and maximum durations.
|
||||
//
|
||||
// It also tries to parse Retry-After response header when a http.StatusTooManyRequests
|
||||
// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of
|
||||
// seconds the server states it may be ready to process more requests from this client.
|
||||
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable {
|
||||
if s, ok := resp.Header["Retry-After"]; ok {
|
||||
if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil {
|
||||
return time.Second * time.Duration(sleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mult := math.Pow(2, float64(attemptNum)) * float64(min)
|
||||
sleep := time.Duration(mult)
|
||||
if float64(sleep) != mult || sleep > max {
|
||||
@ -490,25 +535,31 @@ func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Respo
|
||||
|
||||
// Do wraps calling an HTTP method with retries.
|
||||
func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
if c.HTTPClient == nil {
|
||||
c.HTTPClient = cleanhttp.DefaultPooledClient()
|
||||
}
|
||||
c.clientInit.Do(func() {
|
||||
if c.HTTPClient == nil {
|
||||
c.HTTPClient = cleanhttp.DefaultPooledClient()
|
||||
}
|
||||
})
|
||||
|
||||
logger := c.logger()
|
||||
|
||||
if logger != nil {
|
||||
switch v := logger.(type) {
|
||||
case Logger:
|
||||
v.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
||||
case LeveledLogger:
|
||||
v.Debug("performing request", "method", req.Method, "url", req.URL)
|
||||
case Logger:
|
||||
v.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
||||
}
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
var attempt int
|
||||
var shouldRetry bool
|
||||
var doErr, checkErr error
|
||||
|
||||
for i := 0; ; i++ {
|
||||
attempt++
|
||||
|
||||
var code int // HTTP response code
|
||||
|
||||
// Always rewind the request body when non-nil.
|
||||
@ -527,30 +578,30 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
|
||||
if c.RequestLogHook != nil {
|
||||
switch v := logger.(type) {
|
||||
case Logger:
|
||||
c.RequestLogHook(v, req.Request, i)
|
||||
case LeveledLogger:
|
||||
c.RequestLogHook(hookLogger{v}, req.Request, i)
|
||||
case Logger:
|
||||
c.RequestLogHook(v, req.Request, i)
|
||||
default:
|
||||
c.RequestLogHook(nil, req.Request, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt the request
|
||||
resp, err = c.HTTPClient.Do(req.Request)
|
||||
resp, doErr = c.HTTPClient.Do(req.Request)
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should continue with retries.
|
||||
checkOK, checkErr := c.CheckRetry(req.Context(), resp, err)
|
||||
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
|
||||
|
||||
if err != nil {
|
||||
if doErr != nil {
|
||||
switch v := logger.(type) {
|
||||
case Logger:
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||
case LeveledLogger:
|
||||
v.Error("request failed", "error", err, "method", req.Method, "url", req.URL)
|
||||
v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL)
|
||||
case Logger:
|
||||
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr)
|
||||
}
|
||||
} else {
|
||||
// Call this here to maintain the behavior of logging all requests,
|
||||
@ -558,23 +609,18 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
if c.ResponseLogHook != nil {
|
||||
// Call the response logger function if provided.
|
||||
switch v := logger.(type) {
|
||||
case Logger:
|
||||
c.ResponseLogHook(v, resp)
|
||||
case LeveledLogger:
|
||||
c.ResponseLogHook(hookLogger{v}, resp)
|
||||
case Logger:
|
||||
c.ResponseLogHook(v, resp)
|
||||
default:
|
||||
c.ResponseLogHook(nil, resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now decide if we should continue.
|
||||
if !checkOK {
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
}
|
||||
c.HTTPClient.CloseIdleConnections()
|
||||
return resp, err
|
||||
if !shouldRetry {
|
||||
break
|
||||
}
|
||||
|
||||
// We do this before drainBody because there's no need for the I/O if
|
||||
@ -585,7 +631,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
// We're going to retry, consume any response to reuse the connection.
|
||||
if err == nil && resp != nil {
|
||||
if doErr == nil {
|
||||
c.drainBody(resp.Body)
|
||||
}
|
||||
|
||||
@ -596,10 +642,10 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
}
|
||||
if logger != nil {
|
||||
switch v := logger.(type) {
|
||||
case Logger:
|
||||
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||
case LeveledLogger:
|
||||
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
|
||||
case Logger:
|
||||
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||
}
|
||||
}
|
||||
select {
|
||||
@ -608,21 +654,44 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
return nil, req.Context().Err()
|
||||
case <-time.After(wait):
|
||||
}
|
||||
|
||||
// Make shallow copy of http Request so that we can modify its body
|
||||
// without racing against the closeBody call in persistConn.writeLoop.
|
||||
httpreq := *req.Request
|
||||
req.Request = &httpreq
|
||||
}
|
||||
|
||||
// this is the closest we have to success criteria
|
||||
if doErr == nil && checkErr == nil && !shouldRetry {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
defer c.HTTPClient.CloseIdleConnections()
|
||||
|
||||
err := doErr
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
}
|
||||
|
||||
if c.ErrorHandler != nil {
|
||||
c.HTTPClient.CloseIdleConnections()
|
||||
return c.ErrorHandler(resp, err, c.RetryMax+1)
|
||||
return c.ErrorHandler(resp, err, attempt)
|
||||
}
|
||||
|
||||
// By default, we close the response body and return an error without
|
||||
// returning the response
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
c.drainBody(resp.Body)
|
||||
}
|
||||
c.HTTPClient.CloseIdleConnections()
|
||||
return nil, fmt.Errorf("%s %s giving up after %d attempts",
|
||||
req.Method, req.URL, c.RetryMax+1)
|
||||
|
||||
// this means CheckRetry thought the request was a failure, but didn't
|
||||
// communicate why
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("%s %s giving up after %d attempt(s)",
|
||||
req.Method, req.URL, attempt)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w",
|
||||
req.Method, req.URL, attempt, err)
|
||||
}
|
||||
|
||||
// Try to read the response body so we can reuse this connection.
|
||||
@ -632,10 +701,10 @@ func (c *Client) drainBody(body io.ReadCloser) {
|
||||
if err != nil {
|
||||
if c.logger() != nil {
|
||||
switch v := c.logger().(type) {
|
||||
case Logger:
|
||||
v.Printf("[ERR] error reading response body: %v", err)
|
||||
case LeveledLogger:
|
||||
v.Error("error reading response body", "error", err)
|
||||
case Logger:
|
||||
v.Printf("[ERR] error reading response body: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
11
vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go
generated
vendored
11
vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go
generated
vendored
@ -1,7 +1,9 @@
|
||||
package retryablehttp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -39,5 +41,12 @@ func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
// Execute the request.
|
||||
return rt.Client.Do(retryableReq)
|
||||
resp, err := rt.Client.Do(retryableReq)
|
||||
// If we got an error returned by standard library's `Do` method, unwrap it
|
||||
// otherwise we will wind up erroneously re-nesting the error.
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
return resp, errors.Unwrap(err)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
2
vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go
generated
vendored
2
vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go
generated
vendored
@ -493,7 +493,7 @@ func SafeParseIntSlice(in interface{}, elements int) ([]int, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result = make([]int, len(raw))
|
||||
var result = make([]int, 0, len(raw))
|
||||
for _, element := range raw {
|
||||
result = append(result, int(element))
|
||||
}
|
||||
|
50
vendor/github.com/hashicorp/vault/api/kv.go
generated
vendored
Normal file
50
vendor/github.com/hashicorp/vault/api/kv.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package api
|
||||
|
||||
// A KVSecret is a key-value secret returned by Vault's KV secrets engine,
|
||||
// and is the most basic type of secret stored in Vault.
|
||||
//
|
||||
// Data contains the key-value pairs of the secret itself,
|
||||
// while Metadata contains a subset of metadata describing
|
||||
// this particular version of the secret.
|
||||
// The Metadata field for a KV v1 secret will always be nil, as
|
||||
// metadata is only supported starting in KV v2.
|
||||
//
|
||||
// The Raw field can be inspected for information about the lease,
|
||||
// and passed to a LifetimeWatcher object for periodic renewal.
|
||||
type KVSecret struct {
|
||||
Data map[string]interface{}
|
||||
VersionMetadata *KVVersionMetadata
|
||||
CustomMetadata map[string]interface{}
|
||||
Raw *Secret
|
||||
}
|
||||
|
||||
// KVv1 is used to return a client for reads and writes against
|
||||
// a KV v1 secrets engine in Vault.
|
||||
//
|
||||
// The mount path is the location where the target KV secrets engine resides
|
||||
// in Vault.
|
||||
//
|
||||
// While v1 is not necessarily deprecated, Vault development servers tend to
|
||||
// use v2 as the version of the KV secrets engine, as this is what's mounted
|
||||
// by default when a server is started in -dev mode. See the kvv2 struct.
|
||||
//
|
||||
// Learn more about the KV secrets engine here:
|
||||
// https://www.vaultproject.io/docs/secrets/kv
|
||||
func (c *Client) KVv1(mountPath string) *KVv1 {
|
||||
return &KVv1{c: c, mountPath: mountPath}
|
||||
}
|
||||
|
||||
// KVv2 is used to return a client for reads and writes against
|
||||
// a KV v2 secrets engine in Vault.
|
||||
//
|
||||
// The mount path is the location where the target KV secrets engine resides
|
||||
// in Vault.
|
||||
//
|
||||
// Vault development servers tend to have "secret" as the mount path,
|
||||
// as these are the default settings when a server is started in -dev mode.
|
||||
//
|
||||
// Learn more about the KV secrets engine here:
|
||||
// https://www.vaultproject.io/docs/secrets/kv
|
||||
func (c *Client) KVv2(mountPath string) *KVv2 {
|
||||
return &KVv2{c: c, mountPath: mountPath}
|
||||
}
|
57
vendor/github.com/hashicorp/vault/api/kv_v1.go
generated
vendored
Normal file
57
vendor/github.com/hashicorp/vault/api/kv_v1.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type KVv1 struct {
|
||||
c *Client
|
||||
mountPath string
|
||||
}
|
||||
|
||||
// Get returns a secret from the KV v1 secrets engine.
|
||||
func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) {
|
||||
pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath)
|
||||
|
||||
secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("no secret found at %s", pathToRead)
|
||||
}
|
||||
|
||||
return &KVSecret{
|
||||
Data: secret.Data,
|
||||
VersionMetadata: nil,
|
||||
Raw: secret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) into the
|
||||
// KV v1 secrets engine.
|
||||
//
|
||||
// If the secret already exists, it will be overwritten.
|
||||
func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error {
|
||||
pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath)
|
||||
|
||||
_, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a secret from the KV v1 secrets engine.
|
||||
func (kv *KVv1) Delete(ctx context.Context, secretPath string) error {
|
||||
pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath)
|
||||
|
||||
_, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
788
vendor/github.com/hashicorp/vault/api/kv_v2.go
generated
vendored
Normal file
788
vendor/github.com/hashicorp/vault/api/kv_v2.go
generated
vendored
Normal file
@ -0,0 +1,788 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
type KVv2 struct {
|
||||
c *Client
|
||||
mountPath string
|
||||
}
|
||||
|
||||
// KVMetadata is the full metadata for a given KV v2 secret.
|
||||
type KVMetadata struct {
|
||||
CASRequired bool `mapstructure:"cas_required"`
|
||||
CreatedTime time.Time `mapstructure:"created_time"`
|
||||
CurrentVersion int `mapstructure:"current_version"`
|
||||
CustomMetadata map[string]interface{} `mapstructure:"custom_metadata"`
|
||||
DeleteVersionAfter time.Duration `mapstructure:"delete_version_after"`
|
||||
MaxVersions int `mapstructure:"max_versions"`
|
||||
OldestVersion int `mapstructure:"oldest_version"`
|
||||
UpdatedTime time.Time `mapstructure:"updated_time"`
|
||||
// Keys are stringified ints, e.g. "3". To get a sorted slice of version metadata, use GetVersionsAsList.
|
||||
Versions map[string]KVVersionMetadata `mapstructure:"versions"`
|
||||
Raw *Secret
|
||||
}
|
||||
|
||||
// KVMetadataPutInput is the subset of metadata that can be replaced for a
|
||||
// KV v2 secret using the PutMetadata method.
|
||||
//
|
||||
// All fields should be explicitly provided, as any fields left unset in the
|
||||
// struct will be reset to their zero value.
|
||||
type KVMetadataPutInput struct {
|
||||
CASRequired bool
|
||||
CustomMetadata map[string]interface{}
|
||||
DeleteVersionAfter time.Duration
|
||||
MaxVersions int
|
||||
}
|
||||
|
||||
// KVMetadataPatchInput is the subset of metadata that can be manually modified for
|
||||
// a KV v2 secret using the PatchMetadata method.
|
||||
//
|
||||
// The struct's fields are all pointers. A pointer to a field's zero
|
||||
// value (e.g. false for *bool) implies that field should be reset to its
|
||||
// zero value after update, whereas a field left as a nil pointer
|
||||
// (e.g. nil for *bool) implies the field should remain unchanged.
|
||||
//
|
||||
// Since maps are already pointers, use an empty map to remove all
|
||||
// custom metadata.
|
||||
type KVMetadataPatchInput struct {
|
||||
CASRequired *bool
|
||||
CustomMetadata map[string]interface{}
|
||||
DeleteVersionAfter *time.Duration
|
||||
MaxVersions *int
|
||||
}
|
||||
|
||||
// KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret.
|
||||
type KVVersionMetadata struct {
|
||||
Version int `mapstructure:"version"`
|
||||
CreatedTime time.Time `mapstructure:"created_time"`
|
||||
DeletionTime time.Time `mapstructure:"deletion_time"`
|
||||
Destroyed bool `mapstructure:"destroyed"`
|
||||
}
|
||||
|
||||
// Currently supported options: WithOption, WithCheckAndSet, WithMethod
|
||||
type KVOption func() (key string, value interface{})
|
||||
|
||||
const (
|
||||
KVOptionCheckAndSet = "cas"
|
||||
KVOptionMethod = "method"
|
||||
KVMergeMethodPatch = "patch"
|
||||
KVMergeMethodReadWrite = "rw"
|
||||
)
|
||||
|
||||
// WithOption can optionally be passed to provide generic options for a
|
||||
// KV request. Valid keys and values depend on the type of request.
|
||||
func WithOption(key string, value interface{}) KVOption {
|
||||
return func() (string, interface{}) {
|
||||
return key, value
|
||||
}
|
||||
}
|
||||
|
||||
// WithCheckAndSet can optionally be passed to perform a check-and-set
|
||||
// operation on a KV request. If not set, the write will be allowed.
|
||||
// If cas is set to 0, a write will only be allowed if the key doesn't exist.
|
||||
// If set to non-zero, the write will only be allowed if the key’s current
|
||||
// version matches the version specified in the cas parameter.
|
||||
func WithCheckAndSet(cas int) KVOption {
|
||||
return WithOption(KVOptionCheckAndSet, cas)
|
||||
}
|
||||
|
||||
// WithMergeMethod can optionally be passed to dictate which type of
|
||||
// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH
|
||||
// request will be issued. If set to "rw", then a read will be performed,
|
||||
// then a local update, followed by a remote update. Defaults to "patch".
|
||||
func WithMergeMethod(method string) KVOption {
|
||||
return WithOption(KVOptionMethod, method)
|
||||
}
|
||||
|
||||
// Get returns the latest version of a secret from the KV v2 secrets engine.
|
||||
//
|
||||
// If the latest version has been deleted, an error will not be thrown, but
|
||||
// the Data field on the returned secret will be nil, and the Metadata field
|
||||
// will contain the deletion time.
|
||||
func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) {
|
||||
pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath)
|
||||
|
||||
secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("no secret found at %s", pathToRead)
|
||||
}
|
||||
|
||||
kvSecret, err := extractDataAndVersionMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
|
||||
cm, err := extractCustomMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
kvSecret.CustomMetadata = cm
|
||||
|
||||
return kvSecret, nil
|
||||
}
|
||||
|
||||
// GetVersion returns the data and metadata for a specific version of the
|
||||
// given secret.
|
||||
//
|
||||
// If that version has been deleted, the Data field on the
|
||||
// returned secret will be nil, and the Metadata field will contain the deletion time.
|
||||
//
|
||||
// GetVersionsAsList can provide a list of available versions sorted by
|
||||
// version number, while the response from GetMetadata contains them as a map.
|
||||
func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) {
|
||||
pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath)
|
||||
|
||||
queryParams := map[string][]string{"version": {strconv.Itoa(version)}}
|
||||
secret, err := kv.c.Logical().ReadWithDataWithContext(ctx, pathToRead, queryParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("no secret with version %d found at %s", version, pathToRead)
|
||||
}
|
||||
|
||||
kvSecret, err := extractDataAndVersionMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
|
||||
cm, err := extractCustomMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err)
|
||||
}
|
||||
kvSecret.CustomMetadata = cm
|
||||
|
||||
return kvSecret, nil
|
||||
}
|
||||
|
||||
// GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number.
|
||||
func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) {
|
||||
pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath)
|
||||
|
||||
secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil || secret.Data == nil {
|
||||
return nil, fmt.Errorf("no secret metadata found at %s", pathToRead)
|
||||
}
|
||||
|
||||
md, err := extractFullMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract metadata from secret to determine versions: %w", err)
|
||||
}
|
||||
|
||||
versionsList := make([]KVVersionMetadata, 0, len(md.Versions))
|
||||
for _, versionMetadata := range md.Versions {
|
||||
versionsList = append(versionsList, versionMetadata)
|
||||
}
|
||||
|
||||
sort.Slice(versionsList, func(i, j int) bool { return versionsList[i].Version < versionsList[j].Version })
|
||||
return versionsList, nil
|
||||
}
|
||||
|
||||
// GetMetadata returns the full metadata for a given secret, including a map of
|
||||
// its existing versions and their respective creation/deletion times, etc.
|
||||
func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) {
|
||||
pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath)
|
||||
|
||||
secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil || secret.Data == nil {
|
||||
return nil, fmt.Errorf("no secret metadata found at %s", pathToRead)
|
||||
}
|
||||
|
||||
md, err := extractFullMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract metadata from secret: %w", err)
|
||||
}
|
||||
|
||||
return md, nil
|
||||
}
|
||||
|
||||
// Put inserts a key-value secret (e.g. {"password": "Hashi123"})
|
||||
// into the KV v2 secrets engine.
|
||||
//
|
||||
// If the secret already exists, a new version will be created
|
||||
// and the previous version can be accessed with the GetVersion method.
|
||||
// GetMetadata can provide a list of available versions.
|
||||
func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) {
|
||||
pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath)
|
||||
|
||||
wrappedData := map[string]interface{}{
|
||||
"data": data,
|
||||
}
|
||||
|
||||
// Add options such as check-and-set, etc.
|
||||
// We leave this as an optional arg so that most users
|
||||
// can just pass plain key-value secret data without
|
||||
// having to remember to put the extra layer "data" in there.
|
||||
options := make(map[string]interface{})
|
||||
for _, opt := range opts {
|
||||
k, v := opt()
|
||||
options[k] = v
|
||||
}
|
||||
if len(opts) > 0 {
|
||||
wrappedData["options"] = options
|
||||
}
|
||||
|
||||
secret, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, wrappedData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err)
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("no secret was written to %s", pathToWriteTo)
|
||||
}
|
||||
|
||||
metadata, err := extractVersionMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err)
|
||||
}
|
||||
|
||||
kvSecret := &KVSecret{
|
||||
Data: nil, // secret.Data in this case is the metadata
|
||||
VersionMetadata: metadata,
|
||||
Raw: secret,
|
||||
}
|
||||
|
||||
cm, err := extractCustomMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToWriteTo, err)
|
||||
}
|
||||
kvSecret.CustomMetadata = cm
|
||||
|
||||
return kvSecret, nil
|
||||
}
|
||||
|
||||
// PutMetadata can be used to fully replace a subset of metadata fields for a
|
||||
// given KV v2 secret. All fields will replace the corresponding values on the Vault server.
|
||||
// Any fields left as nil will reset the field on the Vault server back to its zero value.
|
||||
//
|
||||
// To only partially replace the values of these metadata fields, use PatchMetadata.
|
||||
//
|
||||
// This method can also be used to create a new secret with just metadata and no secret data yet.
|
||||
func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error {
|
||||
pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath)
|
||||
|
||||
const (
|
||||
casRequiredKey = "cas_required"
|
||||
deleteVersionAfterKey = "delete_version_after"
|
||||
maxVersionsKey = "max_versions"
|
||||
customMetadataKey = "custom_metadata"
|
||||
)
|
||||
|
||||
// convert values to a map we can pass to Logical
|
||||
metadataMap := make(map[string]interface{})
|
||||
metadataMap[maxVersionsKey] = metadata.MaxVersions
|
||||
metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String()
|
||||
metadataMap[casRequiredKey] = metadata.CASRequired
|
||||
metadataMap[customMetadataKey] = metadata.CustomMetadata
|
||||
|
||||
_, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Patch additively updates the most recent version of a key-value secret,
|
||||
// differentiating it from Put which will fully overwrite the previous data.
|
||||
// Only the key-value pairs that are new or changing need to be provided.
|
||||
//
|
||||
// The WithMethod KVOption function can optionally be passed to dictate which
|
||||
// kind of patch to perform, as older Vault server versions (pre-1.9.0) may
|
||||
// only be able to use the old "rw" (read-then-write) style of partial update,
|
||||
// whereas newer Vault servers can use the default value of "patch" if the
|
||||
// client token's policy has the "patch" capability.
|
||||
func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) {
|
||||
// determine patch method
|
||||
var patchMethod string
|
||||
var ok bool
|
||||
for _, opt := range opts {
|
||||
k, v := opt()
|
||||
if k == "method" {
|
||||
patchMethod, ok = v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine which kind of patch to use,
|
||||
// the newer HTTP Patch style or the older read-then-write style
|
||||
var kvs *KVSecret
|
||||
var perr error
|
||||
switch patchMethod {
|
||||
case "rw":
|
||||
kvs, perr = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData)
|
||||
case "patch":
|
||||
kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...)
|
||||
case "":
|
||||
kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"")
|
||||
}
|
||||
if perr != nil {
|
||||
return nil, fmt.Errorf("unable to perform patch: %w", perr)
|
||||
}
|
||||
if kvs == nil {
|
||||
return nil, fmt.Errorf("no secret was written to %s", secretPath)
|
||||
}
|
||||
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
// PatchMetadata can be used to replace just a subset of a secret's
|
||||
// metadata fields at a time, as opposed to PutMetadata which is used to
|
||||
// completely replace all fields on the previous metadata.
|
||||
func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error {
|
||||
pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath)
|
||||
|
||||
md, err := toMetadataMap(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create map for JSON merge patch request: %w", err)
|
||||
}
|
||||
|
||||
_, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the most recent version of a secret from the KV v2
|
||||
// secrets engine. To delete an older version, use DeleteVersions.
|
||||
func (kv *KVv2) Delete(ctx context.Context, secretPath string) error {
|
||||
pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath)
|
||||
|
||||
_, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteVersions deletes the specified versions of a secret from the KV v2
|
||||
// secrets engine. To delete the latest version of a secret, just use Delete.
|
||||
func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error {
|
||||
// verb and path are different when trying to delete past versions
|
||||
pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath)
|
||||
|
||||
if len(versions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var versionsToDelete []string
|
||||
for _, version := range versions {
|
||||
versionsToDelete = append(versionsToDelete, strconv.Itoa(version))
|
||||
}
|
||||
versionsMap := map[string]interface{}{
|
||||
"versions": versionsToDelete,
|
||||
}
|
||||
_, err := kv.c.Logical().WriteWithContext(ctx, pathToDelete, versionsMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteMetadata deletes all versions and metadata of the secret at the
|
||||
// given path.
|
||||
func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error {
|
||||
pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath)
|
||||
|
||||
_, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Undelete undeletes the given versions of a secret, restoring the data
|
||||
// so that it can be fetched again with Get requests.
|
||||
//
|
||||
// A list of existing versions can be retrieved using the GetVersionsAsList method.
|
||||
func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error {
|
||||
pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath)
|
||||
|
||||
data := map[string]interface{}{
|
||||
"versions": versions,
|
||||
}
|
||||
|
||||
_, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy permanently removes the specified secret versions' data
|
||||
// from the Vault server. If no secret exists at the given path, no
|
||||
// action will be taken.
|
||||
//
|
||||
// A list of existing versions can be retrieved using the GetVersionsAsList method.
|
||||
func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error {
|
||||
pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath)
|
||||
|
||||
data := map[string]interface{}{
|
||||
"versions": versions,
|
||||
}
|
||||
|
||||
_, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback can be used to roll a secret back to a previous
|
||||
// non-deleted/non-destroyed version. That previous version becomes the
|
||||
// next/newest version for the path.
|
||||
func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) {
|
||||
// First, do a read to get the current version for check-and-set
|
||||
latest, err := kv.Get(ctx, secretPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get latest version of secret: %w", err)
|
||||
}
|
||||
|
||||
// Make sure a value already exists
|
||||
if latest == nil {
|
||||
return nil, fmt.Errorf("no secret was found: %w", err)
|
||||
}
|
||||
|
||||
// Verify metadata found
|
||||
if latest.VersionMetadata == nil {
|
||||
return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data")
|
||||
}
|
||||
|
||||
// Now run it again and read the version we want to roll back to
|
||||
rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get previous version %d of secret: %s", toVersion, err)
|
||||
}
|
||||
|
||||
err = validateRollbackVersion(rollbackVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err)
|
||||
}
|
||||
|
||||
casVersion := latest.VersionMetadata.Version
|
||||
kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err)
|
||||
}
|
||||
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
func extractCustomMetadata(secret *Secret) (map[string]interface{}, error) {
|
||||
// Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key
|
||||
customMetadataInterface, ok := secret.Data["custom_metadata"]
|
||||
if !ok {
|
||||
metadataInterface, ok := secret.Data["metadata"]
|
||||
if !ok { // if that's not found, bail since it should have had one or the other
|
||||
return nil, fmt.Errorf("secret is missing expected fields")
|
||||
}
|
||||
metadataMap, ok := metadataInterface.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface)
|
||||
}
|
||||
customMetadataInterface, ok = metadataMap["custom_metadata"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\": %v", metadataMap)
|
||||
}
|
||||
}
|
||||
|
||||
cm, ok := customMetadataInterface.(map[string]interface{})
|
||||
if !ok && customMetadataInterface != nil {
|
||||
return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", customMetadataInterface, customMetadataInterface)
|
||||
}
|
||||
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) {
|
||||
// A nil map is a valid value for data: secret.Data will be nil when this
|
||||
// version of the secret has been deleted, but the metadata is still
|
||||
// available.
|
||||
var data map[string]interface{}
|
||||
if secret.Data != nil {
|
||||
dataInterface, ok := secret.Data["data"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing expected 'data' element")
|
||||
}
|
||||
|
||||
if dataInterface != nil {
|
||||
data, ok = dataInterface.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type for 'data' element: %T (%#v)", data, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metadata, err := extractVersionMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get version metadata: %w", err)
|
||||
}
|
||||
|
||||
return &KVSecret{
|
||||
Data: data,
|
||||
VersionMetadata: metadata,
|
||||
Raw: secret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func extractVersionMetadata(secret *Secret) (*KVVersionMetadata, error) {
|
||||
var metadata *KVVersionMetadata
|
||||
|
||||
if secret.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key
|
||||
var metadataMap map[string]interface{}
|
||||
metadataInterface, ok := secret.Data["metadata"]
|
||||
if ok {
|
||||
metadataMap, ok = metadataInterface.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface)
|
||||
}
|
||||
} else {
|
||||
metadataMap = secret.Data
|
||||
}
|
||||
|
||||
// deletion_time usually comes in as an empty string which can't be
|
||||
// processed as time.RFC3339, so we reset it to a convertible value
|
||||
if metadataMap["deletion_time"] == "" {
|
||||
metadataMap["deletion_time"] = time.Time{}
|
||||
}
|
||||
|
||||
d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
Result: &metadata,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error setting up decoder for API response: %w", err)
|
||||
}
|
||||
|
||||
err = d.Decode(metadataMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata from API response into VersionMetadata: %w", err)
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func extractFullMetadata(secret *Secret) (*KVMetadata, error) {
|
||||
var metadata *KVMetadata
|
||||
|
||||
if secret.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if versions, ok := secret.Data["versions"]; ok {
|
||||
versionsMap := versions.(map[string]interface{})
|
||||
if len(versionsMap) > 0 {
|
||||
for version, metadata := range versionsMap {
|
||||
metadataMap := metadata.(map[string]interface{})
|
||||
// deletion_time usually comes in as an empty string which can't be
|
||||
// processed as time.RFC3339, so we reset it to a convertible value
|
||||
if metadataMap["deletion_time"] == "" {
|
||||
metadataMap["deletion_time"] = time.Time{}
|
||||
}
|
||||
versionInt, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting version %s to integer: %w", version, err)
|
||||
}
|
||||
metadataMap["version"] = versionInt
|
||||
versionsMap[version] = metadataMap // save the updated copy of the metadata map
|
||||
}
|
||||
}
|
||||
secret.Data["versions"] = versionsMap // save the updated copy of the versions map
|
||||
}
|
||||
|
||||
d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
),
|
||||
Result: &metadata,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error setting up decoder for API response: %w", err)
|
||||
}
|
||||
|
||||
err = d.Decode(secret.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata from API response into KVMetadata: %w", err)
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func validateRollbackVersion(rollbackVersion *KVSecret) error {
|
||||
// Make sure a value already exists
|
||||
if rollbackVersion == nil || rollbackVersion.Data == nil {
|
||||
return fmt.Errorf("no secret found")
|
||||
}
|
||||
|
||||
// Verify metadata found
|
||||
if rollbackVersion.VersionMetadata == nil {
|
||||
return fmt.Errorf("no version metadata found; rollback only works on existing data")
|
||||
}
|
||||
|
||||
// Verify it hasn't been deleted
|
||||
if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() {
|
||||
return fmt.Errorf("cannot roll back to a version that has been deleted")
|
||||
}
|
||||
|
||||
if rollbackVersion.VersionMetadata.Destroyed {
|
||||
return fmt.Errorf("cannot roll back to a version that has been destroyed")
|
||||
}
|
||||
|
||||
// Verify old data found
|
||||
if rollbackVersion.Data == nil {
|
||||
return fmt.Errorf("no data found; rollback only works on existing data")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) {
|
||||
pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath)
|
||||
|
||||
// take any other additional options provided
|
||||
// and pass them along to the patch request
|
||||
wrappedData := map[string]interface{}{
|
||||
"data": newData,
|
||||
}
|
||||
options := make(map[string]interface{})
|
||||
for _, opt := range opts {
|
||||
k, v := opt()
|
||||
options[k] = v
|
||||
}
|
||||
if len(opts) > 0 {
|
||||
wrappedData["options"] = options
|
||||
}
|
||||
|
||||
secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData)
|
||||
if err != nil {
|
||||
// If it's a 405, that probably means the server is running a pre-1.9
|
||||
// Vault version that doesn't support the HTTP PATCH method.
|
||||
// Fall back to the old way of doing it.
|
||||
if re, ok := err.(*ResponseError); ok && re.StatusCode == 405 {
|
||||
return readThenWrite(ctx, client, mountPath, secretPath, newData)
|
||||
}
|
||||
|
||||
if re, ok := err.(*ResponseError); ok && re.StatusCode == 403 {
|
||||
return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("error performing merge patch to %s: %s", pathToMergePatch, err)
|
||||
}
|
||||
|
||||
metadata, err := extractVersionMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err)
|
||||
}
|
||||
|
||||
kvSecret := &KVSecret{
|
||||
Data: nil, // secret.Data in this case is the metadata
|
||||
VersionMetadata: metadata,
|
||||
Raw: secret,
|
||||
}
|
||||
|
||||
cm, err := extractCustomMetadata(secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading custom metadata for secret %s: %w", secretPath, err)
|
||||
}
|
||||
kvSecret.CustomMetadata = cm
|
||||
|
||||
return kvSecret, nil
|
||||
}
|
||||
|
||||
func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) {
|
||||
// First, read the secret.
|
||||
existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the secret already exists
|
||||
if existingVersion == nil || existingVersion.Data == nil {
|
||||
return nil, fmt.Errorf("no existing secret was found at %s when doing read-then-write patch operation: %w", secretPath, err)
|
||||
}
|
||||
|
||||
// Verify existing secret has metadata
|
||||
if existingVersion.VersionMetadata == nil {
|
||||
return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath)
|
||||
}
|
||||
|
||||
// Copy new data over with existing data
|
||||
combinedData := existingVersion.Data
|
||||
for k, v := range newData {
|
||||
combinedData[k] = v
|
||||
}
|
||||
|
||||
updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err)
|
||||
}
|
||||
|
||||
return updatedSecret, nil
|
||||
}
|
||||
|
||||
func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) {
|
||||
metadataMap := make(map[string]interface{})
|
||||
|
||||
const (
|
||||
casRequiredKey = "cas_required"
|
||||
deleteVersionAfterKey = "delete_version_after"
|
||||
maxVersionsKey = "max_versions"
|
||||
customMetadataKey = "custom_metadata"
|
||||
)
|
||||
|
||||
// The KVMetadataPatchInput struct is designed to have pointer fields so that
|
||||
// the user can easily express the difference between explicitly setting a
|
||||
// field back to its zero value (e.g. false), as opposed to just having
|
||||
// the field remain unchanged (e.g. nil). This way, they only need to pass
|
||||
// the fields they want to change.
|
||||
if patchInput.MaxVersions != nil {
|
||||
metadataMap[maxVersionsKey] = *(patchInput.MaxVersions)
|
||||
}
|
||||
if patchInput.CASRequired != nil {
|
||||
metadataMap[casRequiredKey] = *(patchInput.CASRequired)
|
||||
}
|
||||
if patchInput.CustomMetadata != nil {
|
||||
if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys
|
||||
metadataMap[customMetadataKey] = nil
|
||||
} else {
|
||||
metadataMap[customMetadataKey] = patchInput.CustomMetadata
|
||||
}
|
||||
}
|
||||
if patchInput.DeleteVersionAfter != nil {
|
||||
metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String()
|
||||
}
|
||||
|
||||
return metadataMap, nil
|
||||
}
|
22
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
22
vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go
generated
vendored
@ -446,18 +446,30 @@ func ParsePublicKeyPEM(data []byte) (interface{}, error) {
|
||||
return nil, errors.New("data does not contain any valid public keys")
|
||||
}
|
||||
|
||||
// addPolicyIdentifiers adds certificate policies extension
|
||||
//
|
||||
// AddPolicyIdentifiers adds certificate policies extension, based on CreationBundle
|
||||
func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) {
|
||||
for _, oidstr := range data.Params.PolicyIdentifiers {
|
||||
oid, err := StringToOid(oidstr)
|
||||
oidOnly := true
|
||||
for _, oidStr := range data.Params.PolicyIdentifiers {
|
||||
oid, err := StringToOid(oidStr)
|
||||
if err == nil {
|
||||
certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid)
|
||||
}
|
||||
if err != nil {
|
||||
oidOnly = false
|
||||
}
|
||||
}
|
||||
if !oidOnly { // Because all policy information is held in the same extension, when we use an extra extension to
|
||||
// add policy qualifier information, that overwrites any information in the PolicyIdentifiers field on the Cert
|
||||
// Template, so we need to reparse all the policy identifiers here
|
||||
extension, err := CreatePolicyInformationExtensionFromStorageStrings(data.Params.PolicyIdentifiers)
|
||||
if err == nil {
|
||||
// If this errors out, don't add it, rely on the OIDs parsed into PolicyIdentifiers above
|
||||
certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, *extension)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addExtKeyUsageOids adds custom extended key usage OIDs to certificate
|
||||
// AddExtKeyUsageOids adds custom extended key usage OIDs to certificate
|
||||
func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) {
|
||||
for _, oidstr := range data.Params.ExtKeyUsageOIDs {
|
||||
oid, err := StringToOid(oidstr)
|
||||
|
114
vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go
generated
vendored
114
vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go
generated
vendored
@ -17,7 +17,10 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
@ -894,3 +897,114 @@ func (p *KeyBundle) ToPrivateKeyPemString() (string, error) {
|
||||
|
||||
return "", errutil.InternalError{Err: "No Private Key Bytes to Wrap"}
|
||||
}
|
||||
|
||||
// PolicyIdentifierWithQualifierEntry Structure for Internal Storage
|
||||
type PolicyIdentifierWithQualifierEntry struct {
|
||||
PolicyIdentifierOid string `json:"oid",mapstructure:"oid"`
|
||||
CPS string `json:"cps,omitempty",mapstructure:"cps"`
|
||||
Notice string `json:"notice,omitempty",mapstructure:"notice"`
|
||||
}
|
||||
|
||||
// GetPolicyIdentifierFromString parses out the internal structure of a Policy Identifier
|
||||
func GetPolicyIdentifierFromString(policyIdentifier string) (*PolicyIdentifierWithQualifierEntry, error) {
|
||||
if policyIdentifier == "" {
|
||||
return nil, nil
|
||||
}
|
||||
entry := &PolicyIdentifierWithQualifierEntry{}
|
||||
// Either a OID, or a JSON Entry: First check OID:
|
||||
_, err := StringToOid(policyIdentifier)
|
||||
if err == nil {
|
||||
entry.PolicyIdentifierOid = policyIdentifier
|
||||
return entry, nil
|
||||
}
|
||||
// Now Check If JSON Entry
|
||||
jsonErr := json.Unmarshal([]byte(policyIdentifier), &entry)
|
||||
if jsonErr != nil { // Neither, if we got here
|
||||
return entry, errors.New(fmt.Sprintf("Policy Identifier %q is neither a valid OID: %s, Nor JSON Policy Identifier: %s", policyIdentifier, err.Error(), jsonErr.Error()))
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Policy Identifier with Qualifier Structure for ASN Marshalling:
|
||||
|
||||
var policyInformationOid = asn1.ObjectIdentifier{2, 5, 29, 32}
|
||||
|
||||
type policyInformation struct {
|
||||
PolicyIdentifier asn1.ObjectIdentifier
|
||||
Qualifiers []interface{} `asn1:"tag:optional,omitempty"`
|
||||
}
|
||||
|
||||
var cpsPolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
|
||||
|
||||
type cpsUrlPolicyQualifier struct {
|
||||
PolicyQualifierID asn1.ObjectIdentifier
|
||||
Qualifier string `asn1:"tag:optional,ia5"`
|
||||
}
|
||||
|
||||
var userNoticePolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
|
||||
|
||||
type userNoticePolicyQualifier struct {
|
||||
PolicyQualifierID asn1.ObjectIdentifier
|
||||
Qualifier userNotice
|
||||
}
|
||||
|
||||
type userNotice struct {
|
||||
ExplicitText string `asn1:"tag:optional,utf8"`
|
||||
}
|
||||
|
||||
func createPolicyIdentifierWithQualifier(entry PolicyIdentifierWithQualifierEntry) (*policyInformation, error) {
|
||||
// Each Policy is Identified by a Unique ID, as designated here:
|
||||
policyOid, err := StringToOid(entry.PolicyIdentifierOid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pi := policyInformation{
|
||||
PolicyIdentifier: policyOid,
|
||||
}
|
||||
if entry.CPS != "" {
|
||||
qualifier := cpsUrlPolicyQualifier{
|
||||
PolicyQualifierID: cpsPolicyQualifierID,
|
||||
Qualifier: entry.CPS,
|
||||
}
|
||||
pi.Qualifiers = append(pi.Qualifiers, qualifier)
|
||||
}
|
||||
if entry.Notice != "" {
|
||||
qualifier := userNoticePolicyQualifier{
|
||||
PolicyQualifierID: userNoticePolicyQualifierID,
|
||||
Qualifier: userNotice{
|
||||
ExplicitText: entry.Notice,
|
||||
},
|
||||
}
|
||||
pi.Qualifiers = append(pi.Qualifiers, qualifier)
|
||||
}
|
||||
return &pi, nil
|
||||
}
|
||||
|
||||
// CreatePolicyInformationExtensionFromStorageStrings parses the stored policyIdentifiers, which might be JSON Policy
|
||||
// Identifier with Qualifier Entries or String OIDs, and returns an extension if everything parsed correctly, and an
|
||||
// error if constructing
|
||||
func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []string) (*pkix.Extension, error) {
|
||||
var policyInformationList []policyInformation
|
||||
for _, policyIdentifierStr := range policyIdentifiers {
|
||||
policyIdentifierEntry, err := GetPolicyIdentifierFromString(policyIdentifierStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if policyIdentifierEntry != nil { // Okay to skip empty entries if there is no error
|
||||
policyInformationStruct, err := createPolicyIdentifierWithQualifier(*policyIdentifierEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
policyInformationList = append(policyInformationList, *policyInformationStruct)
|
||||
}
|
||||
}
|
||||
asn1Bytes, err := asn1.Marshal(policyInformationList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pkix.Extension{
|
||||
Id: policyInformationOid,
|
||||
Critical: false,
|
||||
Value: asn1Bytes,
|
||||
}, nil
|
||||
}
|
||||
|
2
vendor/github.com/hashicorp/vault/sdk/version/version_base.go
generated
vendored
2
vendor/github.com/hashicorp/vault/sdk/version/version_base.go
generated
vendored
@ -11,7 +11,7 @@ var (
|
||||
// Whether cgo is enabled or not; set at build time
|
||||
CgoEnabled bool
|
||||
|
||||
Version = "1.11.0"
|
||||
Version = "1.12.0"
|
||||
VersionPrerelease = "dev1"
|
||||
VersionMetadata = ""
|
||||
)
|
||||
|
24
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
24
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package assert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
@ -32,7 +33,8 @@ var (
|
||||
|
||||
stringType = reflect.TypeOf("")
|
||||
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
bytesType = reflect.TypeOf([]byte{})
|
||||
)
|
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
@ -323,6 +325,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
|
||||
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
|
||||
}
|
||||
case reflect.Slice:
|
||||
{
|
||||
// We only care about the []byte type.
|
||||
if !canConvert(obj1Value, bytesType) {
|
||||
break
|
||||
}
|
||||
|
||||
// []byte can be compared!
|
||||
bytesObj1, ok := obj1.([]byte)
|
||||
if !ok {
|
||||
bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
|
||||
|
||||
}
|
||||
bytesObj2, ok := obj2.([]byte)
|
||||
if !ok {
|
||||
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
|
||||
}
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
}
|
||||
|
||||
return compareEqual, false
|
||||
|
10
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
10
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
@ -736,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
|
||||
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// YAMLEqf asserts that two YAML strings are equivalent.
|
||||
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
|
20
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
20
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@ -1461,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
|
||||
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
|
||||
}
|
||||
|
||||
// WithinRange asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
|
||||
func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinRange(a.t, actual, start, end, msgAndArgs...)
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinRangef(a.t, actual, start, end, msg, args...)
|
||||
}
|
||||
|
||||
// YAMLEq asserts that two YAML strings are equivalent.
|
||||
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
|
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@ -144,7 +145,8 @@ func CallerInfo() []string {
|
||||
if len(parts) > 1 {
|
||||
dir := parts[len(parts)-2]
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
|
||||
path, _ := filepath.Abs(file)
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", path, line))
|
||||
}
|
||||
}
|
||||
|
||||
@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool {
|
||||
|
||||
switch objValue.Kind() {
|
||||
// collection types are empty when they have no element
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
|
||||
case reflect.Chan, reflect.Map, reflect.Slice:
|
||||
return objValue.Len() == 0
|
||||
// pointers are empty if nil or if the value they point to is empty
|
||||
// pointers are empty if nil or if the value they point to is empty
|
||||
case reflect.Ptr:
|
||||
if objValue.IsNil() {
|
||||
return true
|
||||
}
|
||||
deref := objValue.Elem().Interface()
|
||||
return isEmpty(deref)
|
||||
// for all other types, compare against the zero value
|
||||
// for all other types, compare against the zero value
|
||||
// array types are empty when they match their zero-initialized state
|
||||
default:
|
||||
zero := reflect.Zero(objValue.Type())
|
||||
return reflect.DeepEqual(object, zero.Interface())
|
||||
@ -815,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||
return true // we consider nil to be equal to the nil set
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
@ -825,14 +827,32 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice {
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice {
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
@ -859,7 +879,6 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
@ -869,14 +888,32 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice {
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice {
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
@ -1109,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration,
|
||||
return true
|
||||
}
|
||||
|
||||
// WithinRange asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
|
||||
func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
if end.Before(start) {
|
||||
return Fail(t, "Start should be before end", msgAndArgs...)
|
||||
}
|
||||
|
||||
if actual.Before(start) {
|
||||
return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...)
|
||||
} else if actual.After(end) {
|
||||
return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func toFloat(x interface{}) (float64, bool) {
|
||||
var xf float64
|
||||
xok := true
|
||||
|
26
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
26
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
@ -1864,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// WithinRange asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
|
||||
func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.WithinRange(t, actual, start, end, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.WithinRangef(t, actual, start, end, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// YAMLEq asserts that two YAML strings are equivalent.
|
||||
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
|
20
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
20
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
@ -1462,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
|
||||
WithinDurationf(a.t, expected, actual, delta, msg, args...)
|
||||
}
|
||||
|
||||
// WithinRange asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
|
||||
func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
WithinRange(a.t, actual, start, end, msgAndArgs...)
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
WithinRangef(a.t, actual, start, end, msg, args...)
|
||||
}
|
||||
|
||||
// YAMLEq asserts that two YAML strings are equivalent.
|
||||
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
|
1
vendor/k8s.io/klog/v2/README.md
generated
vendored
1
vendor/k8s.io/klog/v2/README.md
generated
vendored
@ -28,7 +28,6 @@ Historical context is available here:
|
||||
Semantic versioning is used in this repository. It contains several Go modules
|
||||
with different levels of stability:
|
||||
- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags
|
||||
- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags
|
||||
- `examples` - no stable API, no tags, no intention to ever stabilize
|
||||
|
||||
Exempt from the API stability guarantee are items (packages, functions, etc.)
|
||||
|
38
vendor/k8s.io/klog/v2/contextual.go
generated
vendored
38
vendor/k8s.io/klog/v2/contextual.go
generated
vendored
@ -34,18 +34,6 @@ import (
|
||||
// mutex locking.
|
||||
|
||||
var (
|
||||
// contextualLoggingEnabled controls whether contextual logging is
|
||||
// active. Disabling it may have some small performance benefit.
|
||||
contextualLoggingEnabled = true
|
||||
|
||||
// globalLogger is the global Logger chosen by users of klog, nil if
|
||||
// none is available.
|
||||
globalLogger *Logger
|
||||
|
||||
// globalLoggerOptions contains the options that were supplied for
|
||||
// globalLogger.
|
||||
globalLoggerOptions loggerOptions
|
||||
|
||||
// klogLogger is used as fallback for logging through the normal klog code
|
||||
// when no Logger is set.
|
||||
klogLogger logr.Logger = logr.New(&klogger{})
|
||||
@ -81,10 +69,10 @@ func SetLogger(logger logr.Logger) {
|
||||
// routing log entries through klogr into klog and then into the actual Logger
|
||||
// backend.
|
||||
func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
|
||||
globalLogger = &logger
|
||||
globalLoggerOptions = loggerOptions{}
|
||||
logging.logger = &logger
|
||||
logging.loggerOptions = loggerOptions{}
|
||||
for _, opt := range opts {
|
||||
opt(&globalLoggerOptions)
|
||||
opt(&logging.loggerOptions)
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,8 +107,8 @@ type loggerOptions struct {
|
||||
// Modifying the logger is not thread-safe and should be done while no other
|
||||
// goroutines invoke log calls, usually during program initialization.
|
||||
func ClearLogger() {
|
||||
globalLogger = nil
|
||||
globalLoggerOptions = loggerOptions{}
|
||||
logging.logger = nil
|
||||
logging.loggerOptions = loggerOptions{}
|
||||
}
|
||||
|
||||
// EnableContextualLogging controls whether contextual logging is enabled.
|
||||
@ -132,14 +120,14 @@ func ClearLogger() {
|
||||
//
|
||||
// This must be called during initialization before goroutines are started.
|
||||
func EnableContextualLogging(enabled bool) {
|
||||
contextualLoggingEnabled = enabled
|
||||
logging.contextualLoggingEnabled = enabled
|
||||
}
|
||||
|
||||
// FromContext retrieves a logger set by the caller or, if not set,
|
||||
// falls back to the program's global logger (a Logger instance or klog
|
||||
// itself).
|
||||
func FromContext(ctx context.Context) Logger {
|
||||
if contextualLoggingEnabled {
|
||||
if logging.contextualLoggingEnabled {
|
||||
if logger, err := logr.FromContext(ctx); err == nil {
|
||||
return logger
|
||||
}
|
||||
@ -160,10 +148,10 @@ func TODO() Logger {
|
||||
// better receive a logger via its parameters. TODO can be used as a temporary
|
||||
// solution for such code.
|
||||
func Background() Logger {
|
||||
if globalLoggerOptions.contextualLogger {
|
||||
// Is non-nil because globalLoggerOptions.contextualLogger is
|
||||
if logging.loggerOptions.contextualLogger {
|
||||
// Is non-nil because logging.loggerOptions.contextualLogger is
|
||||
// only true if a logger was set.
|
||||
return *globalLogger
|
||||
return *logging.logger
|
||||
}
|
||||
|
||||
return klogLogger
|
||||
@ -172,7 +160,7 @@ func Background() Logger {
|
||||
// LoggerWithValues returns logger.WithValues(...kv) when
|
||||
// contextual logging is enabled, otherwise the logger.
|
||||
func LoggerWithValues(logger Logger, kv ...interface{}) Logger {
|
||||
if contextualLoggingEnabled {
|
||||
if logging.contextualLoggingEnabled {
|
||||
return logger.WithValues(kv...)
|
||||
}
|
||||
return logger
|
||||
@ -181,7 +169,7 @@ func LoggerWithValues(logger Logger, kv ...interface{}) Logger {
|
||||
// LoggerWithName returns logger.WithName(name) when contextual logging is
|
||||
// enabled, otherwise the logger.
|
||||
func LoggerWithName(logger Logger, name string) Logger {
|
||||
if contextualLoggingEnabled {
|
||||
if logging.contextualLoggingEnabled {
|
||||
return logger.WithName(name)
|
||||
}
|
||||
return logger
|
||||
@ -190,7 +178,7 @@ func LoggerWithName(logger Logger, name string) Logger {
|
||||
// NewContext returns logr.NewContext(ctx, logger) when
|
||||
// contextual logging is enabled, otherwise ctx.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
if contextualLoggingEnabled {
|
||||
if logging.contextualLoggingEnabled {
|
||||
return logr.NewContext(ctx, logger)
|
||||
}
|
||||
return ctx
|
||||
|
42
vendor/k8s.io/klog/v2/internal/dbg/dbg.go
generated
vendored
Normal file
42
vendor/k8s.io/klog/v2/internal/dbg/dbg.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||
//
|
||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package dbg provides some helper code for call traces.
|
||||
package dbg
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Stacks is a wrapper for runtime.Stack that attempts to recover the data for
|
||||
// all goroutines or the calling one.
|
||||
func Stacks(all bool) []byte {
|
||||
// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
|
||||
n := 10000
|
||||
if all {
|
||||
n = 100000
|
||||
}
|
||||
var trace []byte
|
||||
for i := 0; i < 5; i++ {
|
||||
trace = make([]byte, n)
|
||||
nbytes := runtime.Stack(trace, all)
|
||||
if nbytes < len(trace) {
|
||||
return trace[:nbytes]
|
||||
}
|
||||
n *= 2
|
||||
}
|
||||
return trace
|
||||
}
|
124
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
124
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
@ -20,6 +20,8 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// WithValues implements LogSink.WithValues. The old key/value pairs are
|
||||
@ -44,53 +46,49 @@ func WithValues(oldKV, newKV []interface{}) []interface{} {
|
||||
return kv
|
||||
}
|
||||
|
||||
// TrimDuplicates deduplicates elements provided in multiple key/value tuple
|
||||
// slices, whilst maintaining the distinction between where the items are
|
||||
// contained.
|
||||
func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} {
|
||||
// maintain a map of all seen keys
|
||||
seenKeys := map[interface{}]struct{}{}
|
||||
// build the same number of output slices as inputs
|
||||
outs := make([][]interface{}, len(kvLists))
|
||||
// iterate over the input slices backwards, as 'later' kv specifications
|
||||
// of the same key will take precedence over earlier ones
|
||||
for i := len(kvLists) - 1; i >= 0; i-- {
|
||||
// initialise this output slice
|
||||
outs[i] = []interface{}{}
|
||||
// obtain a reference to the kvList we are processing
|
||||
// and make sure it has an even number of entries
|
||||
kvList := kvLists[i]
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, missingValue)
|
||||
}
|
||||
|
||||
// start iterating at len(kvList) - 2 (i.e. the 2nd last item) for
|
||||
// slices that have an even number of elements.
|
||||
// We add (len(kvList) % 2) here to handle the case where there is an
|
||||
// odd number of elements in a kvList.
|
||||
// If there is an odd number, then the last element in the slice will
|
||||
// have the value 'null'.
|
||||
for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 {
|
||||
k := kvList[i2]
|
||||
// if we have already seen this key, do not include it again
|
||||
if _, ok := seenKeys[k]; ok {
|
||||
continue
|
||||
}
|
||||
// make a note that we've observed a new key
|
||||
seenKeys[k] = struct{}{}
|
||||
// attempt to obtain the value of the key
|
||||
var v interface{}
|
||||
// i2+1 should only ever be out of bounds if we handling the first
|
||||
// iteration over a slice with an odd number of elements
|
||||
if i2+1 < len(kvList) {
|
||||
v = kvList[i2+1]
|
||||
}
|
||||
// add this KV tuple to the *start* of the output list to maintain
|
||||
// the original order as we are iterating over the slice backwards
|
||||
outs[i] = append([]interface{}{k, v}, outs[i]...)
|
||||
}
|
||||
// MergeKVs deduplicates elements provided in two key/value slices.
|
||||
//
|
||||
// Keys in each slice are expected to be unique, so duplicates can only occur
|
||||
// when the first and second slice contain the same key. When that happens, the
|
||||
// key/value pair from the second slice is used. The first slice must be well-formed
|
||||
// (= even key/value pairs). The second one may have a missing value, in which
|
||||
// case the special "missing value" is added to the result.
|
||||
func MergeKVs(first, second []interface{}) []interface{} {
|
||||
maxLength := len(first) + (len(second)+1)/2*2
|
||||
if maxLength == 0 {
|
||||
// Nothing to do at all.
|
||||
return nil
|
||||
}
|
||||
return outs
|
||||
|
||||
if len(first) == 0 && len(second)%2 == 0 {
|
||||
// Nothing to be overridden, second slice is well-formed
|
||||
// and can be used directly.
|
||||
return second
|
||||
}
|
||||
|
||||
// Determine which keys are in the second slice so that we can skip
|
||||
// them when iterating over the first one. The code intentionally
|
||||
// favors performance over completeness: we assume that keys are string
|
||||
// constants and thus compare equal when the string values are equal. A
|
||||
// string constant being overridden by, for example, a fmt.Stringer is
|
||||
// not handled.
|
||||
overrides := map[interface{}]bool{}
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
overrides[second[i]] = true
|
||||
}
|
||||
merged := make([]interface{}, 0, maxLength)
|
||||
for i := 0; i+1 < len(first); i += 2 {
|
||||
key := first[i]
|
||||
if overrides[key] {
|
||||
continue
|
||||
}
|
||||
merged = append(merged, key, first[i+1])
|
||||
}
|
||||
merged = append(merged, second...)
|
||||
if len(merged)%2 != 0 {
|
||||
merged = append(merged, missingValue)
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
const missingValue = "(MISSING)"
|
||||
@ -111,10 +109,10 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if k, ok := k.(string); ok {
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(k)
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
@ -131,6 +129,24 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
@ -163,6 +179,18 @@ func StringerToString(s fmt.Stringer) (ret string) {
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalerToValue invokes a marshaler and catches
|
||||
// panics.
|
||||
func MarshalerToValue(m logr.Marshaler) (ret interface{}) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", err)
|
||||
}
|
||||
}()
|
||||
ret = m.MarshalLog()
|
||||
return
|
||||
}
|
||||
|
||||
// ErrorToString converts an error to a string,
|
||||
// handling panics if they occur.
|
||||
func ErrorToString(err error) (ret string) {
|
||||
|
64
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
64
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
@ -77,6 +77,8 @@ func KRef(namespace, name string) ObjectRef {
|
||||
}
|
||||
|
||||
// KObjs returns slice of ObjectRef from an slice of ObjectMeta
|
||||
//
|
||||
// DEPRECATED: Use KObjSlice instead, it has better performance.
|
||||
func KObjs(arg interface{}) []ObjectRef {
|
||||
s := reflect.ValueOf(arg)
|
||||
if s.Kind() != reflect.Slice {
|
||||
@ -92,3 +94,65 @@ func KObjs(arg interface{}) []ObjectRef {
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
// KObjSlice takes a slice of objects that implement the KMetadata interface
|
||||
// and returns an object that gets logged as a slice of ObjectRef values or a
|
||||
// string containing those values, depending on whether the logger prefers text
|
||||
// output or structured output.
|
||||
//
|
||||
// An error string is logged when KObjSlice is not passed a suitable slice.
|
||||
//
|
||||
// Processing of the argument is delayed until the value actually gets logged,
|
||||
// in contrast to KObjs where that overhead is incurred regardless of whether
|
||||
// the result is needed.
|
||||
func KObjSlice(arg interface{}) interface{} {
|
||||
return kobjSlice{arg: arg}
|
||||
}
|
||||
|
||||
type kobjSlice struct {
|
||||
arg interface{}
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = kobjSlice{}
|
||||
var _ logr.Marshaler = kobjSlice{}
|
||||
|
||||
func (ks kobjSlice) String() string {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return fmt.Sprintf("%v", objectRefs)
|
||||
}
|
||||
|
||||
func (ks kobjSlice) MarshalLog() interface{} {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as nil.
|
||||
return nil, nil
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
}
|
||||
objectRefs := make([]interface{}, 0, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
objectRefs = append(objectRefs, nil)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
objectRefs = append(objectRefs, KObj(v))
|
||||
} else {
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
}
|
||||
}
|
||||
return objectRefs, nil
|
||||
}
|
||||
|
300
vendor/k8s.io/klog/v2/klog.go
generated
vendored
300
vendor/k8s.io/klog/v2/klog.go
generated
vendored
@ -41,6 +41,10 @@
|
||||
//
|
||||
// -logtostderr=true
|
||||
// Logs are written to standard error instead of to files.
|
||||
// This shortcuts most of the usual output routing:
|
||||
// -alsologtostderr, -stderrthreshold and -log_dir have no
|
||||
// effect and output redirection at runtime with SetOutput is
|
||||
// ignored.
|
||||
// -alsologtostderr=false
|
||||
// Logs are written to standard error as well as to files.
|
||||
// -stderrthreshold=ERROR
|
||||
@ -92,6 +96,7 @@ import (
|
||||
|
||||
"k8s.io/klog/v2/internal/buffer"
|
||||
"k8s.io/klog/v2/internal/clock"
|
||||
"k8s.io/klog/v2/internal/dbg"
|
||||
"k8s.io/klog/v2/internal/serialize"
|
||||
"k8s.io/klog/v2/internal/severity"
|
||||
)
|
||||
@ -242,6 +247,10 @@ func (m *moduleSpec) String() string {
|
||||
// Lock because the type is not atomic. TODO: clean this up.
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
return m.serialize()
|
||||
}
|
||||
|
||||
func (m *moduleSpec) serialize() string {
|
||||
var b bytes.Buffer
|
||||
for i, f := range m.filter {
|
||||
if i > 0 {
|
||||
@ -263,6 +272,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of
|
||||
// Set will sets module value
|
||||
// Syntax: -vmodule=recordio=2,file=1,gfs*=3
|
||||
func (m *moduleSpec) Set(value string) error {
|
||||
filter, err := parseModuleSpec(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
logging.setVState(logging.verbosity, filter, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseModuleSpec(value string) ([]modulePat, error) {
|
||||
var filter []modulePat
|
||||
for _, pat := range strings.Split(value, ",") {
|
||||
if len(pat) == 0 {
|
||||
@ -271,15 +291,15 @@ func (m *moduleSpec) Set(value string) error {
|
||||
}
|
||||
patLev := strings.Split(pat, "=")
|
||||
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
|
||||
return errVmoduleSyntax
|
||||
return nil, errVmoduleSyntax
|
||||
}
|
||||
pattern := patLev[0]
|
||||
v, err := strconv.ParseInt(patLev[1], 10, 32)
|
||||
if err != nil {
|
||||
return errors.New("syntax error: expect comma-separated list of filename=N")
|
||||
return nil, errors.New("syntax error: expect comma-separated list of filename=N")
|
||||
}
|
||||
if v < 0 {
|
||||
return errors.New("negative value for vmodule level")
|
||||
return nil, errors.New("negative value for vmodule level")
|
||||
}
|
||||
if v == 0 {
|
||||
continue // Ignore. It's harmless but no point in paying the overhead.
|
||||
@ -287,10 +307,7 @@ func (m *moduleSpec) Set(value string) error {
|
||||
// TODO: check syntax of filter?
|
||||
filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
|
||||
}
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
logging.setVState(logging.verbosity, filter, true)
|
||||
return nil
|
||||
return filter, nil
|
||||
}
|
||||
|
||||
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
|
||||
@ -404,19 +421,19 @@ func InitFlags(flagset *flag.FlagSet) {
|
||||
flagset = flag.CommandLine
|
||||
}
|
||||
|
||||
flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory")
|
||||
flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file")
|
||||
flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
|
||||
flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file (no effect when -logtostderr=true)")
|
||||
flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
|
||||
"Defines the maximum size a log file can grow to. Unit is megabytes. "+
|
||||
"Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
|
||||
"If the value is 0, the maximum file size is unlimited.")
|
||||
flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
|
||||
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files")
|
||||
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files (no effect when -logtostderr=true)")
|
||||
flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
|
||||
flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages")
|
||||
flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
|
||||
flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)")
|
||||
flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files")
|
||||
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
|
||||
flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
|
||||
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
|
||||
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
}
|
||||
@ -426,8 +443,20 @@ func Flush() {
|
||||
logging.lockAndFlushAll()
|
||||
}
|
||||
|
||||
// loggingT collects all the global state of the logging setup.
|
||||
type loggingT struct {
|
||||
// settings collects global settings.
|
||||
type settings struct {
|
||||
// contextualLoggingEnabled controls whether contextual logging is
|
||||
// active. Disabling it may have some small performance benefit.
|
||||
contextualLoggingEnabled bool
|
||||
|
||||
// logger is the global Logger chosen by users of klog, nil if
|
||||
// none is available.
|
||||
logger *Logger
|
||||
|
||||
// loggerOptions contains the options that were supplied for
|
||||
// globalLogger.
|
||||
loggerOptions loggerOptions
|
||||
|
||||
// Boolean flags. Not handled atomically because the flag.Value interface
|
||||
// does not let us avoid the =true, and that shorthand is necessary for
|
||||
// compatibility. TODO: does this matter enough to fix? Seems unlikely.
|
||||
@ -437,26 +466,14 @@ type loggingT struct {
|
||||
// Level flag. Handled atomically.
|
||||
stderrThreshold severityValue // The -stderrthreshold flag.
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
// Access to all of the following fields must be protected via a mutex.
|
||||
|
||||
// mu protects the remaining elements of this structure and is
|
||||
// used to synchronize logging.
|
||||
mu sync.Mutex
|
||||
// file holds writer for each of the log types.
|
||||
file [severity.NumSeverity]flushSyncWriter
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
flushD *flushDaemon
|
||||
// flushInterval is the interval for periodic flushing. If zero,
|
||||
// the global default will be used.
|
||||
flushInterval time.Duration
|
||||
// pcs is used in V to avoid an allocation when computing the caller's PC.
|
||||
pcs [1]uintptr
|
||||
// vmap is a cache of the V Level for each V() call site, identified by PC.
|
||||
// It is wiped whenever the vmodule flag changes state.
|
||||
vmap map[uintptr]Level
|
||||
|
||||
// filterLength stores the length of the vmodule filter chain. If greater
|
||||
// than zero, it means vmodule is enabled. It may be read safely
|
||||
// using sync.LoadInt32, but is only modified under mu.
|
||||
@ -496,7 +513,48 @@ type loggingT struct {
|
||||
filter LogFilter
|
||||
}
|
||||
|
||||
var logging loggingT
|
||||
// deepCopy creates a copy that doesn't share anything with the original
|
||||
// instance.
|
||||
func (s settings) deepCopy() settings {
|
||||
// vmodule is a slice and would be shared, so we have copy it.
|
||||
filter := make([]modulePat, len(s.vmodule.filter))
|
||||
for i := range s.vmodule.filter {
|
||||
filter[i] = s.vmodule.filter[i]
|
||||
}
|
||||
s.vmodule.filter = filter
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// loggingT collects all the global state of the logging setup.
|
||||
type loggingT struct {
|
||||
settings
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
// Uses its own mutex.
|
||||
flushD *flushDaemon
|
||||
|
||||
// mu protects the remaining elements of this structure and the fields
|
||||
// in settingsT which need a mutex lock.
|
||||
mu sync.Mutex
|
||||
|
||||
// pcs is used in V to avoid an allocation when computing the caller's PC.
|
||||
pcs [1]uintptr
|
||||
// vmap is a cache of the V Level for each V() call site, identified by PC.
|
||||
// It is wiped whenever the vmodule flag changes state.
|
||||
vmap map[uintptr]Level
|
||||
}
|
||||
|
||||
var logging = loggingT{
|
||||
settings: settings{
|
||||
contextualLoggingEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
// setVState sets a consistent state for V logging.
|
||||
// l.mu is held.
|
||||
@ -520,6 +578,55 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool
|
||||
|
||||
var timeNow = time.Now // Stubbed out for testing.
|
||||
|
||||
// CaptureState gathers information about all current klog settings.
|
||||
// The result can be used to restore those settings.
|
||||
func CaptureState() State {
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
return &state{
|
||||
settings: logging.settings.deepCopy(),
|
||||
flushDRunning: logging.flushD.isRunning(),
|
||||
maxSize: MaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// State stores a snapshot of klog settings. It gets created with CaptureState
|
||||
// and can be used to restore the entire state. Modifying individual settings
|
||||
// is supported via the command line flags.
|
||||
type State interface {
|
||||
// Restore restore the entire state. It may get called more than once.
|
||||
Restore()
|
||||
}
|
||||
|
||||
type state struct {
|
||||
settings
|
||||
|
||||
flushDRunning bool
|
||||
maxSize uint64
|
||||
}
|
||||
|
||||
func (s *state) Restore() {
|
||||
// This needs to be done before mutex locking.
|
||||
if s.flushDRunning && !logging.flushD.isRunning() {
|
||||
// This is not quite accurate: StartFlushDaemon might
|
||||
// have been called with some different interval.
|
||||
interval := s.flushInterval
|
||||
if interval == 0 {
|
||||
interval = flushInterval
|
||||
}
|
||||
logging.flushD.run(interval)
|
||||
} else if !s.flushDRunning && logging.flushD.isRunning() {
|
||||
logging.flushD.stop()
|
||||
}
|
||||
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
|
||||
logging.settings = s.settings
|
||||
logging.setVState(s.verbosity, s.vmodule.filter, true)
|
||||
MaxSize = s.maxSize
|
||||
}
|
||||
|
||||
/*
|
||||
header formats a log header as defined by the C++ implementation.
|
||||
It returns a buffer containing the formatted header and the user's file and line number.
|
||||
@ -688,7 +795,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
||||
serialize.KVListFormat(&b.Buffer, "err", err)
|
||||
}
|
||||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
l.bufferCache.PutBuffer(b)
|
||||
}
|
||||
@ -757,7 +864,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
||||
|
||||
if l.traceLocation.isSet() {
|
||||
if l.traceLocation.match(file, line) {
|
||||
buf.Write(stacks(false))
|
||||
buf.Write(dbg.Stacks(false))
|
||||
}
|
||||
}
|
||||
data := buf.Bytes()
|
||||
@ -765,7 +872,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
globalLogger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
logging.logger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
} else {
|
||||
log.WithCallDepth(depth + 3).Info(string(data))
|
||||
}
|
||||
@ -822,12 +929,15 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
||||
OsExit(1)
|
||||
}
|
||||
// Dump all goroutine stacks before exiting.
|
||||
trace := stacks(true)
|
||||
// Write the stack trace for all goroutines to the stderr.
|
||||
if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr {
|
||||
os.Stderr.Write(trace)
|
||||
// First, make sure we see the trace for the current goroutine on standard error.
|
||||
// If -logtostderr has been specified, the loop below will do that anyway
|
||||
// as the first stack in the full dump.
|
||||
if !l.toStderr {
|
||||
os.Stderr.Write(dbg.Stacks(false))
|
||||
}
|
||||
|
||||
// Write the stack trace for all goroutines to the files.
|
||||
trace := dbg.Stacks(true)
|
||||
logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
|
||||
for log := severity.FatalLog; log >= severity.InfoLog; log-- {
|
||||
if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
|
||||
@ -847,25 +957,6 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
||||
}
|
||||
}
|
||||
|
||||
// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
|
||||
func stacks(all bool) []byte {
|
||||
// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
|
||||
n := 10000
|
||||
if all {
|
||||
n = 100000
|
||||
}
|
||||
var trace []byte
|
||||
for i := 0; i < 5; i++ {
|
||||
trace = make([]byte, n)
|
||||
nbytes := runtime.Stack(trace, all)
|
||||
if nbytes < len(trace) {
|
||||
return trace[:nbytes]
|
||||
}
|
||||
n *= 2
|
||||
}
|
||||
return trace
|
||||
}
|
||||
|
||||
// logExitFunc provides a simple mechanism to override the default behavior
|
||||
// of exiting on error. Used in testing and to guarantee we reach a required exit
|
||||
// for fatal logs. Instead, exit could be a function rather than a method but that
|
||||
@ -1077,9 +1168,9 @@ func (f *flushDaemon) isRunning() bool {
|
||||
return f.stopC != nil
|
||||
}
|
||||
|
||||
// StopFlushDaemon stops the flush daemon, if running.
|
||||
// StopFlushDaemon stops the flush daemon, if running, and flushes once.
|
||||
// This prevents klog from leaking goroutines on shutdown. After stopping
|
||||
// the daemon, you can still manually flush buffers by calling Flush().
|
||||
// the daemon, you can still manually flush buffers again by calling Flush().
|
||||
func StopFlushDaemon() {
|
||||
logging.flushD.stop()
|
||||
}
|
||||
@ -1109,8 +1200,8 @@ func (l *loggingT) flushAll() {
|
||||
file.Sync() // ignore error
|
||||
}
|
||||
}
|
||||
if globalLoggerOptions.flush != nil {
|
||||
globalLoggerOptions.flush()
|
||||
if logging.loggerOptions.flush != nil {
|
||||
logging.loggerOptions.flush()
|
||||
}
|
||||
}
|
||||
|
||||
@ -1158,7 +1249,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) {
|
||||
}
|
||||
// printWithFileLine with alsoToStderr=true, so standard log messages
|
||||
// always appear on standard error.
|
||||
logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text)
|
||||
logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
@ -1196,10 +1287,10 @@ type Verbose struct {
|
||||
}
|
||||
|
||||
func newVerbose(level Level, b bool) Verbose {
|
||||
if globalLogger == nil {
|
||||
if logging.logger == nil {
|
||||
return Verbose{b, nil}
|
||||
}
|
||||
v := globalLogger.V(int(level))
|
||||
v := logging.logger.V(int(level))
|
||||
return Verbose{b, &v}
|
||||
}
|
||||
|
||||
@ -1318,7 +1409,7 @@ func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
|
||||
// InfoSDepth acts as InfoS but uses depth to determine which call frame to log.
|
||||
// InfoSDepth(0, "msg") is the same as InfoS("msg").
|
||||
func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
|
||||
logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...)
|
||||
logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v.
|
||||
@ -1347,37 +1438,37 @@ func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
|
||||
// Info logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Info(args ...interface{}) {
|
||||
logging.print(severity.InfoLog, globalLogger, logging.filter, args...)
|
||||
logging.print(severity.InfoLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// InfoDepth acts as Info but uses depth to determine which call frame to log.
|
||||
// InfoDepth(0, "msg") is the same as Info("msg").
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is always appended.
|
||||
func Infoln(args ...interface{}) {
|
||||
logging.println(severity.InfoLog, globalLogger, logging.filter, args...)
|
||||
logging.println(severity.InfoLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// InfolnDepth acts as Infoln but uses depth to determine which call frame to log.
|
||||
// InfolnDepth(0, "msg") is the same as Infoln("msg").
|
||||
func InfolnDepth(depth int, args ...interface{}) {
|
||||
logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...)
|
||||
logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...)
|
||||
}
|
||||
|
||||
// InfofDepth acts as Infof but uses depth to determine which call frame to log.
|
||||
// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...).
|
||||
func InfofDepth(depth int, format string, args ...interface{}) {
|
||||
logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
|
||||
// InfoS structured logs to the INFO log.
|
||||
@ -1389,79 +1480,79 @@ func InfofDepth(depth int, format string, args ...interface{}) {
|
||||
// output:
|
||||
// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
|
||||
func InfoS(msg string, keysAndValues ...interface{}) {
|
||||
logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Warning(args ...interface{}) {
|
||||
logging.print(severity.WarningLog, globalLogger, logging.filter, args...)
|
||||
logging.print(severity.WarningLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// WarningDepth acts as Warning but uses depth to determine which call frame to log.
|
||||
// WarningDepth(0, "msg") is the same as Warning("msg").
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is always appended.
|
||||
func Warningln(args ...interface{}) {
|
||||
logging.println(severity.WarningLog, globalLogger, logging.filter, args...)
|
||||
logging.println(severity.WarningLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log.
|
||||
// WarninglnDepth(0, "msg") is the same as Warningln("msg").
|
||||
func WarninglnDepth(depth int, args ...interface{}) {
|
||||
logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...)
|
||||
logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...)
|
||||
}
|
||||
|
||||
// WarningfDepth acts as Warningf but uses depth to determine which call frame to log.
|
||||
// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...).
|
||||
func WarningfDepth(depth int, format string, args ...interface{}) {
|
||||
logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Error(args ...interface{}) {
|
||||
logging.print(severity.ErrorLog, globalLogger, logging.filter, args...)
|
||||
logging.print(severity.ErrorLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// ErrorDepth acts as Error but uses depth to determine which call frame to log.
|
||||
// ErrorDepth(0, "msg") is the same as Error("msg").
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is always appended.
|
||||
func Errorln(args ...interface{}) {
|
||||
logging.println(severity.ErrorLog, globalLogger, logging.filter, args...)
|
||||
logging.println(severity.ErrorLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log.
|
||||
// ErrorlnDepth(0, "msg") is the same as Errorln("msg").
|
||||
func ErrorlnDepth(depth int, args ...interface{}) {
|
||||
logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...)
|
||||
logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...)
|
||||
}
|
||||
|
||||
// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log.
|
||||
// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...).
|
||||
func ErrorfDepth(depth int, format string, args ...interface{}) {
|
||||
logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
|
||||
// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
|
||||
@ -1474,52 +1565,63 @@ func ErrorfDepth(depth int, format string, args ...interface{}) {
|
||||
// output:
|
||||
// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout"
|
||||
func ErrorS(err error, msg string, keysAndValues ...interface{}) {
|
||||
logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log.
|
||||
// ErrorSDepth(0, "msg") is the same as ErrorS("msg").
|
||||
func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
|
||||
logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...)
|
||||
logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls OsExit(255).
|
||||
// prints stack trace(s), then calls OsExit(255).
|
||||
//
|
||||
// Stderr only receives a dump of the current goroutine's stack trace. Log files,
|
||||
// if there are any, receive a dump of the stack traces in all goroutines.
|
||||
//
|
||||
// Callers who want more control over handling of fatal events may instead use a
|
||||
// combination of different functions:
|
||||
// - some info or error logging function, optionally with a stack trace
|
||||
// value generated by github.com/go-logr/lib/dbg.Backtrace
|
||||
// - Flush to flush pending log data
|
||||
// - panic, os.Exit or returning to the caller with an error
|
||||
//
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Fatal(args ...interface{}) {
|
||||
logging.print(severity.FatalLog, globalLogger, logging.filter, args...)
|
||||
logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
|
||||
// FatalDepth(0, "msg") is the same as Fatal("msg").
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls OsExit(255).
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is always appended.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logging.println(severity.FatalLog, globalLogger, logging.filter, args...)
|
||||
logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log.
|
||||
// FatallnDepth(0, "msg") is the same as Fatalln("msg").
|
||||
func FatallnDepth(depth int, args ...interface{}) {
|
||||
logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls OsExit(255).
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...)
|
||||
logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
|
||||
}
|
||||
|
||||
// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log.
|
||||
// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...).
|
||||
func FatalfDepth(depth int, format string, args ...interface{}) {
|
||||
logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
|
||||
// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
|
||||
@ -1530,41 +1632,41 @@ var fatalNoStacks uint32
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Exit(args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.print(severity.FatalLog, globalLogger, logging.filter, args...)
|
||||
logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// ExitDepth acts as Exit but uses depth to determine which call frame to log.
|
||||
// ExitDepth(0, "msg") is the same as Exit("msg").
|
||||
func ExitDepth(depth int, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
|
||||
func Exitln(args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.println(severity.FatalLog, globalLogger, logging.filter, args...)
|
||||
logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
|
||||
}
|
||||
|
||||
// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log.
|
||||
// ExitlnDepth(0, "msg") is the same as Exitln("msg").
|
||||
func ExitlnDepth(depth int, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
|
||||
}
|
||||
|
||||
// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Exitf(format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...)
|
||||
logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
|
||||
}
|
||||
|
||||
// ExitfDepth acts as Exitf but uses depth to determine which call frame to log.
|
||||
// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...).
|
||||
func ExitfDepth(depth int, format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
|
||||
// LogFilter is a collection of functions that can filter all logging calls,
|
||||
|
8
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
8
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
@ -43,11 +43,11 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
trimmed := serialize.TrimDuplicates(l.values, kvList)
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...)
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
@ -55,11 +55,11 @@ func (l klogger) Enabled(level int) bool {
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
trimmed := serialize.TrimDuplicates(l.values, kvList)
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...)
|
||||
ErrorSDepth(l.callDepth+1, err, msg, merged...)
|
||||
}
|
||||
|
||||
// WithName returns a new logr.Logger with the specified name appended. klogr
|
||||
|
35
vendor/modules.txt
vendored
35
vendor/modules.txt
vendored
@ -1,5 +1,5 @@
|
||||
# github.com/IBM/keyprotect-go-client v0.7.0
|
||||
## explicit; go 1.12
|
||||
# github.com/IBM/keyprotect-go-client v0.8.0
|
||||
## explicit; go 1.15
|
||||
github.com/IBM/keyprotect-go-client
|
||||
github.com/IBM/keyprotect-go-client/iam
|
||||
# github.com/PuerkitoBio/purell v1.1.1
|
||||
@ -59,7 +59,7 @@ github.com/aws/aws-sdk-go/service/sso
|
||||
github.com/aws/aws-sdk-go/service/sso/ssoiface
|
||||
github.com/aws/aws-sdk-go/service/sts
|
||||
github.com/aws/aws-sdk-go/service/sts/stsiface
|
||||
# github.com/aws/aws-sdk-go-v2 v1.16.5
|
||||
# github.com/aws/aws-sdk-go-v2 v1.16.7
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/aws
|
||||
github.com/aws/aws-sdk-go-v2/aws/defaults
|
||||
@ -76,21 +76,21 @@ github.com/aws/aws-sdk-go-v2/internal/sdk
|
||||
github.com/aws/aws-sdk-go-v2/internal/strings
|
||||
github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
|
||||
github.com/aws/aws-sdk-go-v2/internal/timeconv
|
||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12
|
||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources
|
||||
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6
|
||||
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
|
||||
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6
|
||||
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
|
||||
# github.com/aws/aws-sdk-go-v2/service/sts v1.16.7
|
||||
# github.com/aws/aws-sdk-go-v2/service/sts v1.16.9
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/sts
|
||||
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
|
||||
github.com/aws/aws-sdk-go-v2/service/sts/types
|
||||
# github.com/aws/smithy-go v1.11.3
|
||||
# github.com/aws/smithy-go v1.12.0
|
||||
## explicit; go 1.15
|
||||
github.com/aws/smithy-go
|
||||
github.com/aws/smithy-go/document
|
||||
@ -229,7 +229,7 @@ github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/gofuzz v1.1.0
|
||||
## explicit; go 1.12
|
||||
github.com/google/gofuzz
|
||||
# github.com/google/uuid v1.1.2
|
||||
# github.com/google/uuid v1.3.0
|
||||
## explicit
|
||||
github.com/google/uuid
|
||||
# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
@ -262,7 +262,7 @@ github.com/hashicorp/go-multierror
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-plugin
|
||||
github.com/hashicorp/go-plugin/internal/plugin
|
||||
# github.com/hashicorp/go-retryablehttp v0.6.6
|
||||
# github.com/hashicorp/go-retryablehttp v0.7.0
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-retryablehttp
|
||||
# github.com/hashicorp/go-rootcerts v1.0.2
|
||||
@ -271,7 +271,7 @@ github.com/hashicorp/go-rootcerts
|
||||
# github.com/hashicorp/go-secure-stdlib/mlock v0.1.1
|
||||
## explicit; go 1.16
|
||||
github.com/hashicorp/go-secure-stdlib/mlock
|
||||
# github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5
|
||||
# github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6
|
||||
## explicit; go 1.16
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil
|
||||
# github.com/hashicorp/go-secure-stdlib/strutil v0.1.2
|
||||
@ -305,11 +305,11 @@ github.com/hashicorp/hcl/json/token
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/vault/command/agent/auth
|
||||
github.com/hashicorp/vault/command/agent/auth/kubernetes
|
||||
# github.com/hashicorp/vault/api v1.6.0
|
||||
## explicit; go 1.13
|
||||
# github.com/hashicorp/vault/api v1.7.2
|
||||
## explicit; go 1.17
|
||||
github.com/hashicorp/vault/api
|
||||
# github.com/hashicorp/vault/sdk v0.5.0
|
||||
## explicit; go 1.16
|
||||
# github.com/hashicorp/vault/sdk v0.5.1
|
||||
## explicit; go 1.17
|
||||
github.com/hashicorp/vault/sdk/helper/certutil
|
||||
github.com/hashicorp/vault/sdk/helper/compressutil
|
||||
github.com/hashicorp/vault/sdk/helper/consts
|
||||
@ -502,7 +502,7 @@ github.com/spf13/cobra
|
||||
# github.com/spf13/pflag v1.0.5
|
||||
## explicit; go 1.12
|
||||
github.com/spf13/pflag
|
||||
# github.com/stretchr/testify v1.7.2
|
||||
# github.com/stretchr/testify v1.8.0
|
||||
## explicit; go 1.13
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/require
|
||||
@ -1185,11 +1185,12 @@ k8s.io/component-base/version
|
||||
k8s.io/component-helpers/node/util/sysctl
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
# k8s.io/klog/v2 v2.60.1
|
||||
# k8s.io/klog/v2 v2.70.1
|
||||
## explicit; go 1.13
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
k8s.io/klog/v2/internal/clock
|
||||
k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
# k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42
|
||||
|
Loading…
Reference in New Issue
Block a user