diff --git a/.commitlintrc.yml b/.commitlintrc.yml index f63f4f828..291fb3fed 100644 --- a/.commitlintrc.yml +++ b/.commitlintrc.yml @@ -41,3 +41,4 @@ rules: - rebase - revert - util + - nfs diff --git a/.github/workflows/retest.yml b/.github/workflows/retest.yml index 3352fc1a8..0ff69338b 100644 --- a/.github/workflows/retest.yml +++ b/.github/workflows/retest.yml @@ -12,7 +12,7 @@ jobs: # path to the retest action - uses: ceph/ceph-csi/actions/retest@devel with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.CEPH_CSI_BOT_TOKEN }} required-label: "ci/retry/e2e" max-retry: "5" required-approve-count: "2" diff --git a/.mergify.yml b/.mergify.yml index 2ef06351d..26576ceae 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -309,6 +309,13 @@ pull_request_rules: label: add: - component/cephfs + - name: title contains NFS + conditions: + - "title~=nfs: " + actions: + label: + add: + - component/nfs - name: title contains RBD conditions: - "title~=rbd: " diff --git a/Makefile b/Makefile index b0c1c8023..3fa146818 100644 --- a/Makefile +++ b/Makefile @@ -46,22 +46,19 @@ endif GO_PROJECT=github.com/ceph/ceph-csi +CEPH_VERSION ?= $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION}) +# TODO: ceph_preview tag may be removed with go-ceph 0.16.0 +GO_TAGS_LIST ?= $(CEPH_VERSION) ceph_preview + # go build flags LDFLAGS ?= LDFLAGS += -X $(GO_PROJECT)/internal/util.GitCommit=$(GIT_COMMIT) # CSI_IMAGE_VERSION will be considered as the driver version LDFLAGS += -X $(GO_PROJECT)/internal/util.DriverVersion=$(CSI_IMAGE_VERSION) +GO_TAGS ?= -tags=$(shell echo $(GO_TAGS_LIST) | tr ' ' ',') BASE_IMAGE ?= $(shell . $(CURDIR)/build.env ; echo $${BASE_IMAGE}) -ifndef CEPH_VERSION - CEPH_VERSION = $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION}) -endif -ifdef CEPH_VERSION - # pass -tags to go commands (for go-ceph build constraints) - GO_TAGS = -tags=$(CEPH_VERSION) -endif - # passing TARGET=static-check on the 'make containerized-test' or 'make # containerized-build' commandline will run the selected target instead of # 'make test' in the container. Obviously other targets can be passed as well, @@ -109,8 +106,12 @@ mod-check: check-env @echo 'running: go mod verify' @go mod verify && [ "$(shell sha512sum go.mod)" = "`sha512sum go.mod`" ] || ( echo "ERROR: go.mod was modified by 'go mod verify'" && false ) -scripts/golangci.yml: build.env scripts/golangci.yml.in - sed "s/@@CEPH_VERSION@@/$(CEPH_VERSION)/g" < scripts/golangci.yml.in > scripts/golangci.yml +scripts/golangci.yml: scripts/golangci.yml.in + rm -f scripts/golangci.yml.buildtags.in + for tag in $(GO_TAGS_LIST); do \ + echo " - $$tag" >> scripts/golangci.yml.buildtags.in ; \ + done + sed "/@@BUILD_TAGS@@/r scripts/golangci.yml.buildtags.in" scripts/golangci.yml.in | sed '/@@BUILD_TAGS@@/d' > scripts/golangci.yml go-lint: scripts/golangci.yml ./scripts/lint-go.sh diff --git a/README.md b/README.md index ddb71fe7f..d3ddc28c0 100644 --- a/README.md +++ b/README.md @@ -105,6 +105,10 @@ for its support details. | | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 | | | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 | | | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 | +| NFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.6.0 | >= v1.5.0 | Pacific (>=16.2.0) | >= v1.22.0 | `NOTE`: The `Alpha` status reflects possible non-backward compatible changes in the future, and is thus not recommended diff --git a/actions/retest/main.go b/actions/retest/main.go index fa595ca9e..a3b85042e 100644 --- a/actions/retest/main.go +++ b/actions/retest/main.go @@ -143,7 +143,7 @@ func main() { } statusList := filterStatusList(rs) - + failedTestFound := false for _, r := range statusList { log.Printf("found context %s with status %s\n", r.GetContext(), r.GetState()) if contains([]string{"failed", "failure"}, r.GetState()) { @@ -176,6 +176,20 @@ func main() { log.Printf("failed to create comment %v\n", err) continue } + failedTestFound = true + } + } + + if failedTestFound { + // comment `@Mergifyio refresh` so mergifyio adds the pr back into the queue. + msg := "@Mergifyio refresh" + comment := &github.IssueComment{ + Body: github.String(msg), + } + _, _, err = c.client.Issues.CreateComment(context.TODO(), c.owner, c.repo, prNumber, comment) + if err != nil { + log.Printf("failed to create comment %q: %v\n", msg, err) + continue } } } diff --git a/api/deploy/kubernetes/nfs/csidriver.go b/api/deploy/kubernetes/nfs/csidriver.go new file mode 100644 index 000000000..4aef84787 --- /dev/null +++ b/api/deploy/kubernetes/nfs/csidriver.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfs + +import ( + "bytes" + _ "embed" + "fmt" + "text/template" + + "github.com/ghodss/yaml" + storagev1 "k8s.io/api/storage/v1" +) + +//go:embed csidriver.yaml +var csiDriver string + +type CSIDriverValues struct { + Name string +} + +var CSIDriverDefaults = CSIDriverValues{ + Name: "nfs.csi.ceph.com", +} + +// NewCSIDriver takes a driver name from the CSIDriverValues struct and +// replaces the value in the template. A CSIDriver object is returned which can +// be created in the Kubernetes cluster. +func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) { + data, err := NewCSIDriverYAML(values) + if err != nil { + return nil, err + } + + driver := &storagev1.CSIDriver{} + err = yaml.Unmarshal([]byte(data), driver) + if err != nil { + return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err) + } + + return driver, nil +} + +// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and +// replaces the value in the template. A CSIDriver object in YAML is returned +// which can be created in the Kubernetes cluster. +func NewCSIDriverYAML(values CSIDriverValues) (string, error) { + var buf bytes.Buffer + + tmpl, err := template.New("CSIDriver").Parse(csiDriver) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + err = tmpl.Execute(&buf, values) + if err != nil { + return "", fmt.Errorf("failed to replace values in template: %w", err) + } + + return buf.String(), nil +} diff --git a/api/deploy/kubernetes/nfs/csidriver.yaml b/api/deploy/kubernetes/nfs/csidriver.yaml new file mode 100644 index 000000000..97afbc50e --- /dev/null +++ b/api/deploy/kubernetes/nfs/csidriver.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "{{ .Name }}" +spec: + attachRequired: false + volumeLifecycleModes: + - Persistent diff --git a/api/deploy/kubernetes/nfs/csidriver_test.go b/api/deploy/kubernetes/nfs/csidriver_test.go new file mode 100644 index 000000000..c7239807f --- /dev/null +++ b/api/deploy/kubernetes/nfs/csidriver_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfs + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewCSIDriver(t *testing.T) { + driver, err := NewCSIDriver(CSIDriverDefaults) + + require.NoError(t, err) + require.NotNil(t, driver) + require.Equal(t, driver.Name, CSIDriverDefaults.Name) +} + +func TestNewCSIDriverYAML(t *testing.T) { + yaml, err := NewCSIDriverYAML(CSIDriverDefaults) + + require.NoError(t, err) + require.NotEqual(t, "", yaml) +} diff --git a/build.env b/build.env index 9398d56c5..66ce0cf6b 100644 --- a/build.env +++ b/build.env @@ -38,7 +38,7 @@ SNAPSHOT_VERSION=v5.0.1 HELM_VERSION=v3.1.2 # minikube settings -MINIKUBE_VERSION=v1.25.0 +MINIKUBE_VERSION=v1.25.2 VM_DRIVER=none CHANGE_MINIKUBE_NONE_USER=true diff --git a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml index f0fe4fe58..b2244d195 100644 --- a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml +++ b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml @@ -126,6 +126,8 @@ spec: mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + - name: ceph-csi-mountinfo + mountPath: /csi/mountinfo resources: {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{- if .Values.nodeplugin.httpMetrics.enabled }} @@ -207,6 +209,10 @@ spec: emptyDir: { medium: "Memory" } + - name: ceph-csi-mountinfo + hostPath: + path: {{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}/mountinfo + type: DirectoryOrCreate {{- if .Values.nodeplugin.affinity }} affinity: {{ toYaml .Values.nodeplugin.affinity | indent 8 -}} diff --git a/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml b/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml index 90a1e311f..120d9627c 100644 --- a/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml +++ b/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml @@ -133,6 +133,9 @@ spec: mountPath: /tmp/csi/keys - name: ceph-logdir mountPath: /var/log/ceph + - name: oidc-token + mountPath: /run/secrets/tokens + readOnly: true resources: {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{- if .Values.nodeplugin.httpMetrics.enabled }} @@ -221,6 +224,13 @@ spec: emptyDir: { medium: "Memory" } + - name: oidc-token + projected: + sources: + - serviceAccountToken: + path: oidc-token + expirationSeconds: 3600 + audience: ceph-csi-kms {{- if .Values.nodeplugin.affinity }} affinity: {{ toYaml .Values.nodeplugin.affinity | indent 8 -}} diff --git a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml index 02616ffa7..b3b09160d 100644 --- a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml @@ -183,6 +183,9 @@ spec: mountPath: /etc/ceph-csi-encryption-kms-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + - name: oidc-token + mountPath: /run/secrets/tokens + readOnly: true resources: {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{- if .Values.provisioner.deployController }} @@ -271,6 +274,13 @@ spec: emptyDir: { medium: "Memory" } + - name: oidc-token + projected: + sources: + - serviceAccountToken: + path: oidc-token + expirationSeconds: 3600 + audience: ceph-csi-kms {{- if .Values.provisioner.affinity }} affinity: {{ toYaml .Values.provisioner.affinity | indent 8 -}} diff --git a/cmd/cephcsi.go b/cmd/cephcsi.go index f419790b0..c36e0cb7c 100644 --- a/cmd/cephcsi.go +++ b/cmd/cephcsi.go @@ -27,6 +27,7 @@ import ( "github.com/ceph/ceph-csi/internal/controller" "github.com/ceph/ceph-csi/internal/controller/persistentvolume" "github.com/ceph/ceph-csi/internal/liveness" + nfsdriver "github.com/ceph/ceph-csi/internal/nfs/driver" rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" @@ -37,11 +38,13 @@ import ( const ( rbdType = "rbd" cephFSType = "cephfs" + nfsType = "nfs" livenessType = "liveness" controllerType = "controller" rbdDefaultName = "rbd.csi.ceph.com" cephFSDefaultName = "cephfs.csi.ceph.com" + nfsDefaultName = "nfs.csi.ceph.com" livenessDefaultName = "liveness.csi.ceph.com" pollTime = 60 // seconds @@ -58,7 +61,7 @@ var conf util.Config func init() { // common flags - flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|liveness|controller]") + flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|nfs|liveness|controller]") flag.StringVar(&conf.Endpoint, "endpoint", "unix:///tmp/csi.sock", "CSI endpoint") flag.StringVar(&conf.DriverName, "drivername", "", "name of the driver") flag.StringVar(&conf.DriverNamespace, "drivernamespace", defaultNS, "namespace in which driver is deployed") @@ -149,6 +152,8 @@ func getDriverName() string { return rbdDefaultName case cephFSType: return cephFSDefaultName + case nfsType: + return nfsDefaultName case livenessType: return livenessDefaultName default: @@ -156,16 +161,20 @@ func getDriverName() string { } } +func printVersion() { + fmt.Println("Cephcsi Version:", util.DriverVersion) + fmt.Println("Git Commit:", util.GitCommit) + fmt.Println("Go Version:", runtime.Version()) + fmt.Println("Compiler:", runtime.Compiler) + fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH) + if kv, err := util.GetKernelVersion(); err == nil { + fmt.Println("Kernel:", kv) + } +} + func main() { if conf.Version { - fmt.Println("Cephcsi Version:", util.DriverVersion) - fmt.Println("Git Commit:", util.GitCommit) - fmt.Println("Go Version:", runtime.Version()) - fmt.Println("Compiler:", runtime.Compiler) - fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH) - if kv, err := util.GetKernelVersion(); err == nil { - fmt.Println("Kernel:", kv) - } + printVersion() os.Exit(0) } log.DefaultLog("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit) @@ -229,6 +238,10 @@ func main() { driver := cephfs.NewDriver() driver.Run(&conf) + case nfsType: + driver := nfsdriver.NewDriver() + driver.Run(&conf) + case livenessType: liveness.Run(&conf) diff --git a/deploy/Makefile b/deploy/Makefile index 9d6decc4c..e42d22c32 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -15,12 +15,16 @@ .PHONY: all all: \ scc.yaml \ + nfs/kubernetes/csidriver.yaml \ rbd/kubernetes/csidriver.yaml \ rbd/kubernetes/csi-config-map.yaml scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go $(MAKE) -C ../tools generate-deploy +nfs/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/nfs/csidriver.yaml ../api/deploy/kubernetes/nfs/csidriver.go + $(MAKE) -C ../tools generate-deploy + rbd/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/rbd/csidriver.yaml ../api/deploy/kubernetes/rbd/csidriver.go $(MAKE) -C ../tools generate-deploy diff --git a/deploy/cephcsi/image/Dockerfile b/deploy/cephcsi/image/Dockerfile index c9adc5241..311c6f5be 100644 --- a/deploy/cephcsi/image/Dockerfile +++ b/deploy/cephcsi/image/Dockerfile @@ -33,6 +33,8 @@ RUN dnf -y install \ /usr/bin/cc \ make \ git \ + && dnf clean all \ + && rm -rf /var/cache/yum \ && true ENV GOROOT=${GOROOT} \ diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index f044fc958..5e1d0f9d6 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -100,6 +100,8 @@ spec: mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + - name: ceph-csi-mountinfo + mountPath: /csi/mountinfo - name: liveness-prometheus securityContext: privileged: true @@ -164,6 +166,10 @@ spec: emptyDir: { medium: "Memory" } + - name: ceph-csi-mountinfo + hostPath: + path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo + type: DirectoryOrCreate --- # This is a service to expose the liveness metrics apiVersion: v1 diff --git a/deploy/nfs/kubernetes/csidriver.yaml b/deploy/nfs/kubernetes/csidriver.yaml new file mode 100644 index 000000000..bfa42f6ae --- /dev/null +++ b/deploy/nfs/kubernetes/csidriver.yaml @@ -0,0 +1,16 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "nfs.csi.ceph.com" +spec: + attachRequired: false + volumeLifecycleModes: + - Persistent diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index 5616d878a..6c70f7ec5 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -163,6 +163,9 @@ spec: mountPath: /tmp/csi/keys - name: ceph-config mountPath: /etc/ceph/ + - name: oidc-token + mountPath: /run/secrets/tokens + readOnly: true - name: csi-rbdplugin-controller # for stable functionality replace canary with latest release version image: quay.io/cephcsi/cephcsi:canary @@ -231,3 +234,10 @@ spec: emptyDir: { medium: "Memory" } + - name: oidc-token + projected: + sources: + - serviceAccountToken: + path: oidc-token + expirationSeconds: 3600 + audience: ceph-csi-kms diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index a7d5a3399..e9117e9ff 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -118,6 +118,9 @@ spec: mountPath: /var/log/ceph - name: ceph-config mountPath: /etc/ceph/ + - name: oidc-token + mountPath: /run/secrets/tokens + readOnly: true - name: liveness-prometheus securityContext: privileged: true @@ -189,6 +192,13 @@ spec: emptyDir: { medium: "Memory" } + - name: oidc-token + projected: + sources: + - serviceAccountToken: + path: oidc-token + expirationSeconds: 3600 + audience: ceph-csi-kms --- # This is a service to expose the liveness metrics apiVersion: v1 diff --git a/docs/ceph-fuse-corruption.md b/docs/ceph-fuse-corruption.md new file mode 100644 index 000000000..ad83e0a4e --- /dev/null +++ b/docs/ceph-fuse-corruption.md @@ -0,0 +1,45 @@ +# ceph-fuse: detection of corrupted mounts and their recovery + +Mounts managed by ceph-fuse may get corrupted by e.g. the ceph-fuse process +exiting abruptly, or its parent Node Plugin container being terminated, taking +down its child processes with it. + +This may manifest in concerned workloads like so: + +``` +# mount | grep fuse +ceph-fuse on /cephfs-share type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) +# ls /cephfs-share +ls: /cephfs-share: Socket not connected +``` + +or, + +``` +# stat /home/kubelet/pods/ae344b80-3b07-4589-b1a1-ca75fa9debf2/volumes/kubernetes.io~csi/pvc-ec69de59-7823-4840-8eee-544f8261fef0/mount: transport endpoint is not connected +``` + +This feature allows CSI CephFS plugin to be able to detect if a ceph-fuse mount +is corrupted during the volume publishing phase, and will attempt to recover it +for the newly scheduled pod. Pods that already reside on a node whose +ceph-fuse mountpoints were broken may still need to be restarted, however. + +## Detection + +A mountpoint is deemed corrupted if `stat()`-ing it returns one of the +following errors: + +* `ENOTCONN` +* `ESTALE` +* `EIO` +* `EACCES` +* `EHOSTDOWN` + +## Recovery + +Once a mountpoint corruption is detected, its recovery is performed by +remounting the volume associated with it. + +Recovery is attempted only if `/csi/mountinfo` directory is made available to +CSI CephFS plugin (available by default in the Helm chart and Kubernetes +manifests). diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 166f89f2e..0c90bf644 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -382,6 +382,34 @@ the AWS KMS is expected to contain: This Secret is expected to be created by the administrator who deployed Ceph-CSI. +#### Configuring Amazon KMS with Amazon STS + +Ceph-CSI can be configured to use +[Amazon STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html), +when kubernetes cluster is configured with OIDC identity provider to fetch +credentials to access Amazon KMS. Other functionalities is the same as +[Amazon KMS encryption](#configuring-amazon-kms). + +There are a few settings that need to be included in the [KMS configuration +file](../examples/kms/vault/kms-config.yaml): + +1. `encryptionKMSType`: should be set to `aws-sts-metadata`. +1. `secretName`: name of the Kubernetes Secret (in the Namespace where + PVC is created) which contains the credentials for communicating with + AWS. This defaults to `ceph-csi-aws-credentials`. + +The [Secret with credentials](../examples/kms/vault/aws-sts-credentials.yaml) for +the AWS KMS is expected to contain: + +1. `awsRoleARN`: Role which will be used access credentials from AWS STS + and access AWS KMS for encryption. +1. `awsCMKARN`: Custom Master Key, ARN for the key used to encrypt the + passphrase +1. `awsRegion`: the region where the AWS STS and KMS service is available. + +This Secret is expected to be created by the tenant/user in each namespace where +Ceph-CSI is used to create encrypted rbd volumes. + ### Encryption prerequisites In order for encryption to work you need to make sure that `dm-crypt` kernel diff --git a/docs/design/proposals/nfs.md b/docs/design/proposals/nfs.md new file mode 100644 index 000000000..2ce9d7436 --- /dev/null +++ b/docs/design/proposals/nfs.md @@ -0,0 +1,70 @@ +# Dynamic provisioning of NFS volumes + +Ceph has [support for NFS-Ganesha to export directories][ceph_mgr_nfs] on +CephFS. This can be used to export CephFS based volumes over NFS. + +## Node-Plugin for mounting NFS-exports + +The Kubernetes CSI community provides and maintains a [NFS CSI][nfs_csi] +driver. This driver can be used as a Node-Plugin so that NFS CSI volumes can be +mounted. When a CSI volume has the `server` and `share` +[parameters][nfs_csi_params], the Node-Plugin will be able to mount the +NFS-export. + +## Exporting CephFS based volumes over NFS + +Ceph-CSI already creates CephFS volumes, that can be mounted over the native +CephFS protocol. A new provisioner in Ceph-CSI can create CephFS volumes, and +include the required [NFS CSI parameters][nfs_csi_params] so that the [NFS CSI +driver][nfs_csi] can mount the CephFS volume over NFS. + +The provisioner that handles the CSI requests for NFS volume can call the [Ceph +Mgr commands to export/unexport][ceph_mgr_nfs] the CephFS volumes. The CephFS +volumes would be internally managed by the NFS provisioner, and only be exposed +as NFS CSI volumes towards the consumers. + +### `CreateVolume` CSI operation + +When the Ceph-CSI NFS provisioner is requested to create a NFS CSI volume, the +following steps need to be taken: + +1. create a CephFS volume, use the CephFS `CreateVolume` call or other internal + API +1. call the Ceph Mgr API to export the CephFS volume with NFS-Ganesha +1. return the NFS CSI volume, with `server` and `share` parameters (other + parameters that are useful for CephFS volume management may be kept) + +The 2nd step requires a NFS-cluster name for the Ceph Mgr call(s). The name of +the NFS-cluster as managed by Ceph should be provided in the parameters of the +`CreateVolume` operation. For Kubernetes that means the parameters is set as an +option in the `StorageClass`. + +The `server` parameter of the volume is an other option that is managed by the +Ceph (or Rook) infrastructure. This parameter is also required to be provided +in the `CreateVolume` parameters. + +Removing the NFS-export for the volume (or other operations) requires the name +of the NFS-cluster, as it is needed for the Ceph Mgr API. Like other parameters +of the CephFS volume, it will be needed to store the NFS-cluster name in the +OMAP journalling. + +### `DeleteVolume` CSI operation + +The `DeleteVolume` operation only receives the `volume_id` parameter, which +is to be used by the CSI Controller (provisioner) to locate the backing volume. +The `DeleteVolume` operation for the CephFS provisioner already knows how to +delete volumes by ID. + +In order to remove the exported volume from the NFS-cluster, the operation +needs to fetch the name of the NFS-cluster from the journal where it was stored +during `CreateVolume`. + +### Additional CSI operations + +`CreateVolume` and `DeleteVolume` are the only required operations for the CSI +Controller (provisioner). Additional features as they are supported by CephFS +can forward the operations to the CephFS provisioner at a later time. + +[ceph_mgr_nfs]: https://docs.ceph.com/en/latest/mgr/nfs/ +[nfs_csi]: https://github.com/kubernetes-csi/csi-driver-nfs +[nfs_csi_params]: https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/docs/driver-parameters.md diff --git a/docs/static-pvc.md b/docs/static-pvc.md index 82e3ccda2..23aa14b58 100644 --- a/docs/static-pvc.md +++ b/docs/static-pvc.md @@ -228,7 +228,7 @@ static CephFS PV | :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | | clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes | | fsName | CephFS filesystem name into which the subvolume should be created/present | Yes | -| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes | +| staticVolume | Value must be set to `true` to mount and unmount static cephFS PVC | Yes | | rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes | **Note** ceph-csi does not supports CephFS subvolume deletion for static PV. diff --git a/e2e/cephfs.go b/e2e/cephfs.go index afb3a516f..b59827b9d 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -280,6 +280,7 @@ var _ = Describe("cephfs", func() { It("Test CephFS CSI", func() { pvcPath := cephFSExamplePath + "pvc.yaml" appPath := cephFSExamplePath + "pod.yaml" + deplPath := cephFSExamplePath + "deployment.yaml" appRWOPPath := cephFSExamplePath + "pod-rwop.yaml" pvcClonePath := cephFSExamplePath + "pvc-restore.yaml" pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml" @@ -504,6 +505,134 @@ var _ = Describe("cephfs", func() { } }) + By("verifying that ceph-fuse recovery works for new pods", func() { + err := deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, true, map[string]string{ + "mounter": "fuse", + }) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + replicas := int32(2) + pvc, depl, err := validatePVCAndDeploymentAppBinding( + f, pvcPath, deplPath, f.UniqueName, &replicas, deployTimeout, + ) + if err != nil { + e2elog.Failf("failed to create PVC and Deployment: %v", err) + } + deplPods, err := listPods(f, depl.Namespace, &metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", depl.Labels["app"]), + }) + if err != nil { + e2elog.Failf("failed to list pods for Deployment: %v", err) + } + + doStat := func(podName string) (stdErr string, err error) { + _, stdErr, err = execCommandInContainerByPodName( + f, + fmt.Sprintf("stat %s", depl.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath), + depl.Namespace, + podName, + depl.Spec.Template.Spec.Containers[0].Name, + ) + + return stdErr, err + } + ensureStatSucceeds := func(podName string) error { + stdErr, statErr := doStat(podName) + if statErr != nil || stdErr != "" { + return fmt.Errorf( + "expected stat to succeed without error output ; got err %w, stderr %s", + statErr, stdErr, + ) + } + + return nil + } + + pod1Name, pod2Name := deplPods[0].Name, deplPods[1].Name + + // stat() ceph-fuse mountpoints to make sure they are working. + for i := range deplPods { + err = ensureStatSucceeds(deplPods[i].Name) + if err != nil { + e2elog.Failf(err.Error()) + } + } + // Kill ceph-fuse in cephfs-csi node plugin Pods. + nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName) + if err != nil { + e2elog.Failf("failed to get node plugin DaemonSet label selector: %v", err) + } + _, stdErr, err := execCommandInContainer( + f, "killall -9 ceph-fuse", cephCSINamespace, "csi-cephfsplugin", &metav1.ListOptions{ + LabelSelector: nodePluginSelector, + }, + ) + if err != nil { + e2elog.Failf("killall command failed: err %v, stderr %s", err, stdErr) + } + // Verify Pod podName2 that stat()-ing the mountpoint results in ENOTCONN. + stdErr, err = doStat(pod2Name) + if err == nil || !strings.Contains(stdErr, "not connected") { + e2elog.Failf( + "expected stat to fail with 'Transport endpoint not connected' or 'Socket not connected'; got err %v, stderr %s", + err, stdErr, + ) + } + // Delete podName2 Pod. This serves two purposes: it verifies that deleting pods with + // corrupted ceph-fuse mountpoints works, and it lets the replicaset controller recreate + // the pod with hopefully mounts working again. + err = deletePod(pod2Name, depl.Namespace, c, deployTimeout) + if err != nil { + e2elog.Failf(err.Error()) + } + // Wait for the second Pod to be recreated. + err = waitForDeploymentComplete(c, depl.Name, depl.Namespace, deployTimeout) + if err != nil { + e2elog.Failf(err.Error()) + } + // List Deployment's pods again to get name of the new pod. + deplPods, err = listPods(f, depl.Namespace, &metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", depl.Labels["app"]), + }) + if err != nil { + e2elog.Failf("failed to list pods for Deployment: %v", err) + } + for i := range deplPods { + if deplPods[i].Name != pod1Name { + pod2Name = deplPods[i].Name + + break + } + } + if pod2Name == "" { + podNames := make([]string, len(deplPods)) + for i := range deplPods { + podNames[i] = deplPods[i].Name + } + e2elog.Failf("no new replica found ; found replicas %v", podNames) + } + // Verify Pod podName3 has its ceph-fuse mount working again. + err = ensureStatSucceeds(pod2Name) + if err != nil { + e2elog.Failf(err.Error()) + } + + // Delete created resources. + err = deletePVCAndDeploymentApp(f, pvc, depl) + if err != nil { + e2elog.Failf("failed to delete PVC and Deployment: %v", err) + } + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + }) + By("create a PVC and bind it to an app", func() { err := createCephfsStorageClass(f.ClientSet, f, false, nil) if err != nil { diff --git a/e2e/pod.go b/e2e/pod.go index 799646636..394b92a18 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -214,6 +214,29 @@ func execCommandInContainer( return stdOut, stdErr, err } +func execCommandInContainerByPodName( + f *framework.Framework, shellCmd, namespace, podName, containerName string, +) (string, string, error) { + cmd := []string{"/bin/sh", "-c", shellCmd} + execOpts := framework.ExecOptions{ + Command: cmd, + PodName: podName, + Namespace: namespace, + ContainerName: containerName, + Stdin: nil, + CaptureStdout: true, + CaptureStderr: true, + PreserveWhitespace: true, + } + + stdOut, stdErr, err := f.ExecWithOptions(execOpts) + if stdErr != "" { + e2elog.Logf("stdErr occurred: %v", stdErr) + } + + return stdOut, stdErr, err +} + func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string, error) { opt := &metav1.ListOptions{ LabelSelector: rookToolBoxPodLabel, diff --git a/e2e/rbd.go b/e2e/rbd.go index fa9020025..ebd15b1fc 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -984,7 +984,7 @@ var _ = Describe("RBD", func() { } app.Namespace = f.UniqueName - err = createPVCAndDeploymentApp(f, "", pvc, app, deployTimeout) + err = createPVCAndDeploymentApp(f, pvc, app, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application: %v", err) } @@ -1014,7 +1014,7 @@ var _ = Describe("RBD", func() { } } - err = deletePVCAndDeploymentApp(f, "", pvc, app) + err = deletePVCAndDeploymentApp(f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application: %v", err) } @@ -1093,7 +1093,7 @@ var _ = Describe("RBD", func() { appClone.Namespace = f.UniqueName appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true - err = createPVCAndDeploymentApp(f, "", pvcClone, appClone, deployTimeout) + err = createPVCAndDeploymentApp(f, pvcClone, appClone, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application: %v", err) } @@ -1131,7 +1131,7 @@ var _ = Describe("RBD", func() { } } - err = deletePVCAndDeploymentApp(f, "", pvcClone, appClone) + err = deletePVCAndDeploymentApp(f, pvcClone, appClone) if err != nil { e2elog.Failf("failed to delete PVC and application: %v", err) } @@ -1217,7 +1217,7 @@ var _ = Describe("RBD", func() { appClone.Namespace = f.UniqueName appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true - err = createPVCAndDeploymentApp(f, "", pvcClone, appClone, deployTimeout) + err = createPVCAndDeploymentApp(f, pvcClone, appClone, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application: %v", err) } @@ -1254,7 +1254,7 @@ var _ = Describe("RBD", func() { e2elog.Failf(stdErr) } } - err = deletePVCAndDeploymentApp(f, "", pvcClone, appClone) + err = deletePVCAndDeploymentApp(f, pvcClone, appClone) if err != nil { e2elog.Failf("failed to delete PVC and application: %v", err) } diff --git a/e2e/utils.go b/e2e/utils.go index 2882fea95..8bc873d5f 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -191,19 +191,12 @@ func createPVCAndApp( return err } -// createPVCAndDeploymentApp creates pvc and deployment, if name is not empty -// same will be set as pvc and app name. +// createPVCAndDeploymentApp creates pvc and deployment. func createPVCAndDeploymentApp( f *framework.Framework, - name string, pvc *v1.PersistentVolumeClaim, app *appsv1.Deployment, pvcTimeout int) error { - if name != "" { - pvc.Name = name - app.Name = name - app.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name - } err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout) if err != nil { return err @@ -213,19 +206,50 @@ func createPVCAndDeploymentApp( return err } -// DeletePVCAndDeploymentApp deletes pvc and deployment, if name is not empty -// same will be set as pvc and app name. -func deletePVCAndDeploymentApp( +// validatePVCAndDeploymentAppBinding creates PVC and Deployment, and waits until +// all its replicas are Running. Use `replicas` to override default number of replicas +// defined in `deploymentPath` Deployment manifest. +func validatePVCAndDeploymentAppBinding( f *framework.Framework, - name string, - pvc *v1.PersistentVolumeClaim, - app *appsv1.Deployment) error { - if name != "" { - pvc.Name = name - app.Name = name - app.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name + pvcPath string, + deploymentPath string, + namespace string, + replicas *int32, + pvcTimeout int, +) (*v1.PersistentVolumeClaim, *appsv1.Deployment, error) { + pvc, err := loadPVC(pvcPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to load PVC: %w", err) + } + pvc.Namespace = namespace + + depl, err := loadAppDeployment(deploymentPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to load Deployment: %w", err) + } + depl.Namespace = f.UniqueName + if replicas != nil { + depl.Spec.Replicas = replicas } + err = createPVCAndDeploymentApp(f, pvc, depl, pvcTimeout) + if err != nil { + return nil, nil, err + } + + err = waitForDeploymentComplete(f.ClientSet, depl.Name, depl.Namespace, deployTimeout) + if err != nil { + return nil, nil, err + } + + return pvc, depl, nil +} + +// DeletePVCAndDeploymentApp deletes pvc and deployment. +func deletePVCAndDeploymentApp( + f *framework.Framework, + pvc *v1.PersistentVolumeClaim, + app *appsv1.Deployment) error { err := deleteDeploymentApp(f.ClientSet, app.Name, app.Namespace, deployTimeout) if err != nil { return err diff --git a/examples/kms/vault/aws-sts-credentials.yaml b/examples/kms/vault/aws-sts-credentials.yaml new file mode 100644 index 000000000..5f8436efd --- /dev/null +++ b/examples/kms/vault/aws-sts-credentials.yaml @@ -0,0 +1,13 @@ +--- +# This is an example Kubernetes Secret that can be created in the Kubernetes +# Namespace where Ceph-CSI is deployed. The contents of this Secret will be +# used to fetch credentials from Amazon STS and use it connect to the +# Amazon KMS. +apiVersion: v1 +kind: Secret +metadata: + name: ceph-csi-aws-credentials +stringData: + awsRoleARN: "arn:aws:iam::111122223333:role/aws-sts-kms" + awsCMKARN: "arn:aws:kms:us-west-2:111123:key/1234cd-12ab-34cd-56ef-123590ab" + awsRegion: "us-west-2" diff --git a/examples/kms/vault/csi-kms-connection-details.yaml b/examples/kms/vault/csi-kms-connection-details.yaml index a835d34e9..23e896a5b 100644 --- a/examples/kms/vault/csi-kms-connection-details.yaml +++ b/examples/kms/vault/csi-kms-connection-details.yaml @@ -67,7 +67,12 @@ data: "IBM_KP_SERVICE_INSTANCE_ID": "7abef064-01dd-4237-9ea5-8b3890970be3", "IBM_KP_BASE_URL": "https://us-south.kms.cloud.ibm.com", "IBM_KP_TOKEN_URL": "https://iam.cloud.ibm.com/oidc/token", - "IBM_KP_REGION": "us-south-2", + "IBM_KP_REGION": "us-south-2" + } + aws-sts-metadata-test: |- + { + "encryptionKMSType": "aws-sts-metadata", + "secretName": "ceph-csi-aws-credentials" } metadata: name: csi-kms-connection-details diff --git a/examples/kms/vault/kms-config.yaml b/examples/kms/vault/kms-config.yaml index b77afd37d..01b3f3e37 100644 --- a/examples/kms/vault/kms-config.yaml +++ b/examples/kms/vault/kms-config.yaml @@ -96,6 +96,10 @@ data: "secretName": "ceph-csi-kp-credentials", "keyProtectRegionKey": "us-south-2", "keyProtectServiceInstanceID": "7abef064-01dd-4237-9ea5-8b3890970be3" + }, + "aws-sts-metadata-test": { + "encryptionKMSType": "aws-sts-metadata", + "secretName": "ceph-csi-aws-credentials" } } metadata: diff --git a/examples/nfs/README.md b/examples/nfs/README.md new file mode 100644 index 000000000..8b47ddf02 --- /dev/null +++ b/examples/nfs/README.md @@ -0,0 +1,70 @@ +# Dynamic provisioning with NFS + +The easiest way to try out the examples for dynamic provisioning with NFS, is +to use [Rook Ceph with CephNFS][rook_ceph]. Rook can be used to deploy a Ceph +cluster. Ceph is able to maintain a NFS-Ganesha service with a few commands, +making configuring the Ceph cluster a minimal effort. + +## Enabling the Ceph NFS-service + +Ceph does not enable the NFS-service by default. In order for Rook Ceph to be +able to configure NFS-exports, the NFS-service needs to be configured first. + +In the [Rook Toolbox][rook_toolbox], run the following commands: + +```console +ceph osd pool create nfs-ganesha +ceph mgr module enable rook +ceph mgr module enable nfs +ceph orch set backend rook +``` + +## Create a NFS-cluster + +In the directory where this `README` is located, there is an example +`rook-nfs.yaml` file. This file can be used to create a Ceph managed +NFS-cluster with the name "my-nfs". + +```console +$ kubectl create -f rook-nfs.yaml +cephnfs.ceph.rook.io/my-nfs created +``` + +The CephNFS resource will create a NFS-Ganesha Pod and Service with label +`app=rook-ceph-nfs`: + +```console +$ kubectl get pods -l app=rook-ceph-nfs +NAME READY STATUS RESTARTS AGE +rook-ceph-nfs-my-nfs-a-5d47f66977-sc2rk 2/2 Running 0 61s +$ kubectl get service -l app=rook-ceph-nfs +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rook-ceph-nfs-my-nfs-a ClusterIP 172.30.218.195 2049/TCP 2m58s +``` + +## Create a StorageClass + +The parameters of the StorageClass reflect mostly what CephFS requires to +connect to the Ceph cluster. All required options are commented clearly in the +`storageclass.yaml` file. + +In addition to the CephFS parameters, there are: + +- `nfsCluster`: name of the Ceph managed NFS-cluster (here `my-nfs`) +- `server`: hostname/IP/service of the NFS-server (here `172.30.218.195`) + +Edit `storageclass.yaml`, and create the resource: + +```console +$ kubectl create -f storageclass.yaml +storageclass.storage.k8s.io/csi-nfs-sc created +``` + +## TODO: next steps + +- deploy the NFS-provisioner +- deploy the kubernetes-csi/csi-driver-nfs +- create the CSIDriver object + +[rook_ceph]: https://rook.io/docs/rook/latest/ceph-nfs-crd.html +[rook_toolbox]: https://rook.io/docs/rook/latest/ceph-toolbox.html diff --git a/examples/nfs/pod.yaml b/examples/nfs/pod.yaml new file mode 100644 index 000000000..9c1217408 --- /dev/null +++ b/examples/nfs/pod.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: cephcsi-nfs-demo-pod +spec: + containers: + - name: web-server + image: docker.io/library/nginx:latest + volumeMounts: + - name: mypvc + mountPath: /var/lib/www + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: cephcsi-nfs-pvc + readOnly: false diff --git a/examples/nfs/pvc.yaml b/examples/nfs/pvc.yaml new file mode 100644 index 000000000..8f4fca12a --- /dev/null +++ b/examples/nfs/pvc.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cephcsi-nfs-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: csi-nfs-sc diff --git a/examples/nfs/rook-nfs.yaml b/examples/nfs/rook-nfs.yaml new file mode 100644 index 000000000..18e230ad9 --- /dev/null +++ b/examples/nfs/rook-nfs.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephNFS +metadata: + name: my-nfs + namespace: default +spec: + # For Ceph v15, the rados block is required. It is ignored for Ceph v16. + rados: + # Ceph v16 always uses/expects "nfs-ganesha" + pool: nfs-ganesha + # RADOS namespace where NFS client recovery data is stored in the pool. + # fixed value for Ceph v16: the name of this CephNFS object + namespace: my-nfs + + # Settings for the NFS server + server: + # the number of active NFS servers + active: 1 diff --git a/examples/nfs/storageclass.yaml b/examples/nfs/storageclass.yaml new file mode 100644 index 000000000..aaf7703c1 --- /dev/null +++ b/examples/nfs/storageclass.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-nfs-sc +provisioner: nfs.csi.ceph.com +parameters: + # (required) Name of the NFS-cluster as managed by Ceph. + nfsCluster: + + # (required) Hostname, ip-address or service that points to the Ceph managed + # NFS-server that will be used for mounting the NFS-export. + server: + + # + # The parameters below are standard CephFS options, these are used for + # managing the underlying CephFS volume. + # + + # (required) String representing a Ceph cluster to provision storage from. + # Should be unique across all Ceph clusters in use for provisioning, + # cannot be greater than 36 bytes in length, and should remain immutable for + # the lifetime of the StorageClass in use. + # Ensure to create an entry in the configmap named ceph-csi-config, based on + # csi-config-map-sample.yaml, to accompany the string chosen to + # represent the Ceph cluster in clusterID below + clusterID: + + # (required) CephFS filesystem name into which the volume shall be created + # eg: fsName: myfs + fsName: + + # (optional) Ceph pool into which volume data shall be stored + # pool: + + # The secrets have to contain user and/or Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret + csi.storage.k8s.io/provisioner-secret-namespace: default + csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret + csi.storage.k8s.io/controller-expand-secret-namespace: default + csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret + csi.storage.k8s.io/node-stage-secret-namespace: default + + # (optional) Prefix to use for naming subvolumes. + # If omitted, defaults to "csi-vol-". + volumeNamePrefix: nfs-export- + +reclaimPolicy: Delete +allowVolumeExpansion: false diff --git a/go.mod b/go.mod index c3af45fbd..af7eaf6bc 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,8 @@ go 1.17 require ( github.com/IBM/keyprotect-go-client v0.7.0 - github.com/aws/aws-sdk-go v1.43.8 + github.com/aws/aws-sdk-go v1.43.22 + github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/go-ceph v0.14.0 github.com/container-storage-interface/spec v1.5.0 @@ -13,29 +14,29 @@ require ( github.com/golang/protobuf v1.5.2 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/vault/api v1.4.1 - github.com/kubernetes-csi/csi-lib-utils v0.10.0 + github.com/hashicorp/vault/api v1.5.0 + github.com/kubernetes-csi/csi-lib-utils v0.11.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.18.1 + github.com/onsi/gomega v1.19.0 github.com/pborman/uuid v1.2.1 github.com/prometheus/client_golang v1.12.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 - google.golang.org/grpc v1.44.0 - google.golang.org/protobuf v1.27.1 - k8s.io/api v0.23.4 - k8s.io/apimachinery v0.23.4 + google.golang.org/grpc v1.45.0 + google.golang.org/protobuf v1.28.0 + k8s.io/api v0.23.5 + k8s.io/apimachinery v0.23.5 k8s.io/client-go v12.0.0+incompatible - k8s.io/cloud-provider v0.23.4 - k8s.io/klog/v2 v2.40.1 + k8s.io/cloud-provider v0.23.5 + k8s.io/klog/v2 v2.60.1 // // when updating k8s.io/kubernetes, make sure to update the replace section too // - k8s.io/kubernetes v1.23.4 - k8s.io/mount-utils v0.23.4 + k8s.io/kubernetes v1.23.5 + k8s.io/mount-utils v0.23.5 k8s.io/utils v0.0.0-20211116205334-6203023598ed sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 ) @@ -43,6 +44,11 @@ require ( require ( github.com/armon/go-metrics v0.3.9 // indirect github.com/armon/go-radix v1.0.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect + github.com/aws/smithy-go v1.11.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.2.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect @@ -61,7 +67,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.5 // indirect + github.com/google/go-cmp v0.5.7 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/googleapis/gnostic v0.5.5 // indirect @@ -126,7 +132,7 @@ require ( go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/proto/otlp v0.7.0 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect @@ -139,13 +145,13 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiserver v0.23.4 // indirect - k8s.io/component-base v0.23.4 // indirect - k8s.io/component-helpers v0.23.4 // indirect + k8s.io/apiserver v0.23.5 // indirect + k8s.io/component-base v0.23.5 // indirect + k8s.io/component-helpers v0.23.5 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect @@ -160,31 +166,31 @@ replace ( // // k8s.io/kubernetes depends on these k8s.io packages, but unversioned // - k8s.io/api => k8s.io/api v0.23.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.23.4 - k8s.io/apiserver => k8s.io/apiserver v0.23.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.4 - k8s.io/client-go => k8s.io/client-go v0.23.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.4 - k8s.io/code-generator => k8s.io/code-generator v0.23.4 - k8s.io/component-base => k8s.io/component-base v0.23.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.23.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.23.4 - k8s.io/cri-api => k8s.io/cri-api v0.23.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.4 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.4 - k8s.io/kubectl => k8s.io/kubectl v0.23.4 - k8s.io/kubelet => k8s.io/kubelet v0.23.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.4 - k8s.io/metrics => k8s.io/metrics v0.23.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.23.4 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.4 + k8s.io/api => k8s.io/api v0.23.5 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.5 + k8s.io/apimachinery => k8s.io/apimachinery v0.23.5 + k8s.io/apiserver => k8s.io/apiserver v0.23.5 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.5 + k8s.io/client-go => k8s.io/client-go v0.23.5 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.5 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.5 + k8s.io/code-generator => k8s.io/code-generator v0.23.5 + k8s.io/component-base => k8s.io/component-base v0.23.5 + k8s.io/component-helpers => k8s.io/component-helpers v0.23.5 + k8s.io/controller-manager => k8s.io/controller-manager v0.23.5 + k8s.io/cri-api => k8s.io/cri-api v0.23.5 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.5 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.5 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.5 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.5 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.5 + k8s.io/kubectl => k8s.io/kubectl v0.23.5 + k8s.io/kubelet => k8s.io/kubelet v0.23.5 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.5 + k8s.io/metrics => k8s.io/metrics v0.23.5 + k8s.io/mount-utils => k8s.io/mount-utils v0.23.5 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.5 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.5 // layeh.com seems to be misbehaving layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 ) diff --git a/go.sum b/go.sum index 3cc47e82b..ac38c1d11 100644 --- a/go.sum +++ b/go.sum @@ -138,8 +138,20 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.43.8 h1:8a/M9C4l5CxFNM6IuNx4F1p+ITJEX12VxWxUQo61cbc= -github.com/aws/aws-sdk-go v1.43.8/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.22 h1:QY9/1TZB73UDEVQ68sUVJXf/7QUiHZl7zbbLF1wpqlc= +github.com/aws/aws-sdk-go v1.43.22/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js= +github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s= +github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= +github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -437,8 +449,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -635,8 +648,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= -github.com/hashicorp/vault/api v1.4.1 h1:mWLfPT0RhxBitjKr6swieCEP2v5pp/M//t70S3kMLRo= -github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= @@ -729,8 +742,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.10.0 h1:Aqm8X81eCzzfH/bvIEqSWtcbK9HF9NbFk4d+le1snVA= -github.com/kubernetes-csi/csi-lib-utils v0.10.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0= +github.com/kubernetes-csi/csi-lib-utils v0.11.0 h1:FHWOBtAZBA/hVk7v/qaXgG9Sxv0/n06DebPFuDwumqg= +github.com/kubernetes-csi/csi-lib-utils v0.11.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= @@ -852,8 +865,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -861,8 +874,8 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -1045,8 +1058,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1266,8 +1280,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1619,8 +1634,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= @@ -1628,8 +1643,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1690,28 +1706,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E= -k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= -k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU= -k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= -k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM= -k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apiserver v0.23.4 h1:zNvQlG+C/ERjuUz4p7eY/0IWHaMixRSBoxgmyIdwo9Y= -k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= -k8s.io/cli-runtime v0.23.4/go.mod h1:7KywUNTUibmHPqmpDFuRO1kc9RhsufHv2lkjCm2YZyM= -k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= -k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= -k8s.io/cloud-provider v0.23.4 h1:Nx42V7+Vpaad3qZE031MpTfCDl3jeQrX6wuwieES/nc= -k8s.io/cloud-provider v0.23.4/go.mod h1:+RFNcj7DczZJE250/l55hh4Be4tlHkNgdtmI4PzxhJ0= -k8s.io/cluster-bootstrap v0.23.4/go.mod h1:H5UZ3a4ZvjyUIgTgW8VdnN1rm3DsRqhotqK9oDMHU1o= -k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/component-base v0.23.4 h1:SziYh48+QKxK+ykJ3Ejqd98XdZIseVBG7sBaNLPqy6M= -k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= -k8s.io/component-helpers v0.23.4 h1:zCLeBuo3Qs0BqtJu767RXJgs5S9ruFJZcbM1aD+cMmc= -k8s.io/component-helpers v0.23.4/go.mod h1:1Pl7L4zukZ054ElzRbvmZ1FJIU8roBXFOeRFu8zipa4= -k8s.io/controller-manager v0.23.4/go.mod h1:+ednTkO5Z25worecG5ORa7NssZT0cpuVunVHN+24Ccs= -k8s.io/cri-api v0.23.4/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/csi-translation-lib v0.23.4/go.mod h1:hvAm5aoprpfE7p9Xnfe3ObmbhDcYp3U7AZJnVQUlrqw= +k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA= +k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= +k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI= +k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ= +k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0= +k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apiserver v0.23.5 h1:2Ly8oUjz5cnZRn1YwYr+aFgDZzUmEVL9RscXbnIeDSE= +k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw= +k8s.io/cli-runtime v0.23.5/go.mod h1:oY6QDF2qo9xndSq32tqcmRp2UyXssdGrLfjAVymgbx4= +k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8= +k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= +k8s.io/cloud-provider v0.23.5 h1:cf5Il2oV++RtlqgNesHd+tDFtOp85dG0t9KN/pmb71s= +k8s.io/cloud-provider v0.23.5/go.mod h1:xMZFA6pIYKweqTkWCYVgRSVMAjqOvxVr3u/kmfyxvkU= +k8s.io/cluster-bootstrap v0.23.5/go.mod h1:8/Gz6VTOMmEDDhn8U/nx0McnQR4YETAqiYXIlqR8hdQ= +k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE= +k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0= +k8s.io/component-helpers v0.23.5 h1:6uTMNP6xxJrSzYTC7BCcH2S/PbSZGxSUZG0PG+nT4tM= +k8s.io/component-helpers v0.23.5/go.mod h1:5riXJgjTIs+ZB8xnf5M2anZ8iQuq37a0B/0BgoPQuSM= +k8s.io/controller-manager v0.23.5/go.mod h1:n/KRlUzAtkFcZodZ/w0GlQdmErVKh7lS/wS0bbo7W4I= +k8s.io/cri-api v0.23.5/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/csi-translation-lib v0.23.5/go.mod h1:8RyFkoHAJrFU7c7MN1ZUjctm3ZhHclKm1FIHNSyGcuw= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1722,28 +1738,28 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4= -k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.23.4/go.mod h1:hpmPi4oaLBe014CkBCqzBYWok64H2C7Ka6FBLJvHgkg= -k8s.io/kube-controller-manager v0.23.4/go.mod h1:r4Cn9Y8t3GyMPrPnOGCDRpeyEKVOITuwHJ7pIWXH0IY= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-aggregator v0.23.5/go.mod h1:3ynYx07Co6dzjpKPgipM+1/Mt2Jcm7dY++cRlKLr5s8= +k8s.io/kube-controller-manager v0.23.5/go.mod h1:Pkg5lIk9YG9Qjj4F7Dn0gi6/k8cEYP63oLdgrlrrtu4= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-proxy v0.23.4/go.mod h1:uZBvTCJYVBqnlyup3JpXaMmqrlkzHjcakHhf7ojYUKk= -k8s.io/kube-scheduler v0.23.4/go.mod h1:KNKYvMZ8dhoMLYygiEMEK+JKFQ2fhW2CLj7B5zEQ/68= -k8s.io/kubectl v0.23.4 h1:mAa+zEOlyZieecEy+xSrhjkpMcukYyHWzcNdX28dzMY= -k8s.io/kubectl v0.23.4/go.mod h1:Dgb0Rvx/8JKS/C2EuvsNiQc6RZnX0SbHJVG3XUzH6ok= -k8s.io/kubelet v0.23.4 h1:yptgklhQ3dtHHIpH/RgI0861XWoJ9/YIBnnxYS6l8VI= -k8s.io/kubelet v0.23.4/go.mod h1:RjbycP9Wnpbw33G8yFt9E23+pFYxzWy1d8qHU0KVUgg= -k8s.io/kubernetes v1.23.4 h1:25dqAMS96u+9L/A7AHdEW7aMTcmHoQMbMPug6Fa61JE= -k8s.io/kubernetes v1.23.4/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io= -k8s.io/legacy-cloud-providers v0.23.4/go.mod h1:dl0qIfmTyeDpRe/gaudDVnLsykKW2DE7oBWbuJl2Gd8= -k8s.io/metrics v0.23.4/go.mod h1:cl6sY9BdVT3DubbpqnkPIKi6mn/F2ltkU4yH1tEJ3Bo= -k8s.io/mount-utils v0.23.4 h1:tWUj5A0DJ29haMiO7F3pNdP2HwyMWczzvqQmikFc9s8= -k8s.io/mount-utils v0.23.4/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98= -k8s.io/pod-security-admission v0.23.4/go.mod h1:cikO3akkUoTZ8uFhkHdlWp0m3XosiOqssTHb+TfCjLw= -k8s.io/sample-apiserver v0.23.4/go.mod h1:ITqvv82GqqeRue7dmsP7A/As/MHE2v1H3vriNRFv+/U= +k8s.io/kube-proxy v0.23.5/go.mod h1:/yCbRrOHgPCb1g1k4XmMJPmNesfdPhZTGrvwNlNgwo8= +k8s.io/kube-scheduler v0.23.5/go.mod h1:IJGf4WngeoAHLj4ms4n3Poa29ttmaxCXxIqpgU0ky7E= +k8s.io/kubectl v0.23.5 h1:DmDULqCaF4qstj0Im143XmncvqWtJxHzK8IrW2BzlU0= +k8s.io/kubectl v0.23.5/go.mod h1:lLgw7cVY8xbd7o637vOXPca/w6HC205KsPCRDYRCxwE= +k8s.io/kubelet v0.23.5 h1:eCGJ7olStiyF7TYHlUTjpXg2ltw7Bs9OPZcch8HP2Go= +k8s.io/kubelet v0.23.5/go.mod h1:M0aj0gaX+rOaGfCfqkV6P7QbwtMwqbL6RdwviHmnehU= +k8s.io/kubernetes v1.23.5 h1:bxpSv2BKc2MqYRfyqQqLVdodLZ2r+NZ/rEdZXyUAvug= +k8s.io/kubernetes v1.23.5/go.mod h1:avI3LUTUYZugxwh52KMVM7v9ZjB5gYJ6D3FIoZ1SHUo= +k8s.io/legacy-cloud-providers v0.23.5/go.mod h1:IENlwY686f1fbakotgNf7gAQuIyCvOUIAXkPPPE/7KU= +k8s.io/metrics v0.23.5/go.mod h1:WNAtV2a5BYbmDS8+7jSqYYV6E3efuGTpIwJ8PTD1wgs= +k8s.io/mount-utils v0.23.5 h1:MOhJKZTpfC21r5OamKYWMdVNtTMDD9wZfTkLOhI5nuE= +k8s.io/mount-utils v0.23.5/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98= +k8s.io/pod-security-admission v0.23.5/go.mod h1:aSyWfjev8Zil5DaZBZ+ICAObZmZlRqhnAZHxA9r71UI= +k8s.io/sample-apiserver v0.23.5/go.mod h1:m4cnT3HgRY5Dt2AjMVKGnb31D6rGY0B+xpKtRJUUC8w= k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1760,8 +1776,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 h1:KQOkVzXrLNb0EP6W0FD6u3CCPAwgXFYwZitbj7K0P0Y= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 h1:+ReKrjTrd57mtAU19BJkxSAaWRIQkFlaWcO6dGFVP1g= sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 9a35475e8..d40544d1f 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -27,6 +27,7 @@ import ( fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" csicommon "github.com/ceph/ceph-csi/internal/csi-common" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" "github.com/container-storage-interface/spec/lib/go/csi" @@ -148,6 +149,35 @@ func checkContentSource( return nil, nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source %v", volumeSource) } +// checkValidCreateVolumeRequest checks if the request is valid +// CreateVolumeRequest by inspecting the request parameters. +func checkValidCreateVolumeRequest( + vol, + parentVol *store.VolumeOptions, + pvID *store.VolumeIdentifier, + sID *store.SnapshotIdentifier) error { + switch { + case pvID != nil: + if vol.Size < parentVol.Size { + return fmt.Errorf( + "cannot clone from volume %s: volume size %d is smaller than source volume size %d", + pvID.VolumeID, + parentVol.Size, + vol.Size) + } + case sID != nil: + if vol.Size < parentVol.Size { + return fmt.Errorf( + "cannot restore from snapshot %s: volume size %d is smaller than source volume size %d", + sID.SnapshotID, + parentVol.Size, + vol.Size) + } + } + + return nil +} + // CreateVolume creates a reservation and the volume in backend, if it is not already present. // nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity func (cs *ControllerServer) CreateVolume( @@ -199,6 +229,11 @@ func (cs *ControllerServer) CreateVolume( defer parentVol.Destroy() } + err = checkValidCreateVolumeRequest(volOptions, parentVol, pvID, sID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr) if err != nil { if cerrors.IsCloneRetryError(err) { @@ -233,7 +268,8 @@ func (cs *ControllerServer) CreateVolume( } } - volumeContext := req.GetParameters() + // remove kubernetes csi prefixed parameters. + volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters()) volumeContext["subvolumeName"] = vID.FsSubvolName volumeContext["subvolumePath"] = volOptions.RootPath volume := &csi.Volume{ @@ -306,7 +342,8 @@ func (cs *ControllerServer) CreateVolume( log.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s", vID.FsSubvolName, requestName) - volumeContext := req.GetParameters() + // remove kubernetes csi prefixed parameters. + volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters()) volumeContext["subvolumeName"] = vID.FsSubvolName volumeContext["subvolumePath"] = volOptions.RootPath volume := &csi.Volume{ @@ -786,10 +823,10 @@ func (cs *ControllerServer) DeleteSnapshot( // success as deletion is complete return &csi.DeleteSnapshotResponse{}, nil case errors.Is(err, cerrors.ErrSnapNotFound): - err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) + err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) if err != nil { log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", - sid.FsSubvolName, sid.FsSnapshotName, err) + sid.RequestName, sid.FsSnapshotName, err) return nil, status.Error(codes.Internal, err.Error()) } @@ -799,10 +836,10 @@ func (cs *ControllerServer) DeleteSnapshot( // if the error is ErrVolumeNotFound, the subvolume is already deleted // from backend, Hence undo the omap entries and return success log.ErrorLog(ctx, "Volume not present") - err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) + err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) if err != nil { log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", - sid.FsSubvolName, sid.FsSnapshotName, err) + sid.RequestName, sid.FsSnapshotName, err) return nil, status.Error(codes.Internal, err.Error()) } @@ -837,7 +874,7 @@ func (cs *ControllerServer) DeleteSnapshot( if err != nil { return nil, status.Error(codes.Internal, err.Error()) } - err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) + err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) if err != nil { log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", sid.RequestName, sid.FsSnapshotName, err) diff --git a/internal/cephfs/core/volume.go b/internal/cephfs/core/volume.go index d01b5c143..35cd4c809 100644 --- a/internal/cephfs/core/volume.go +++ b/internal/cephfs/core/volume.go @@ -237,7 +237,6 @@ func (s *subVolumeClient) CreateVolume(ctx context.Context) error { opts.PoolLayout = s.Pool } - fmt.Println("this is for debugging ") // FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID) err = ca.CreateSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, &opts) if err != nil { diff --git a/internal/cephfs/fuserecovery.go b/internal/cephfs/fuserecovery.go new file mode 100644 index 000000000..32018a328 --- /dev/null +++ b/internal/cephfs/fuserecovery.go @@ -0,0 +1,273 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cephfs + +import ( + "context" + + "github.com/ceph/ceph-csi/internal/cephfs/mounter" + "github.com/ceph/ceph-csi/internal/cephfs/store" + fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" + + mountutil "k8s.io/mount-utils" +) + +type ( + mountState int +) + +const ( + msUnknown mountState = iota + msNotMounted + msMounted + msCorrupted + + // ceph-fuse fsType in /proc//mountinfo. + cephFuseFsType = "fuse.ceph-fuse" +) + +func (ms mountState) String() string { + return [...]string{ + "UNKNOWN", + "NOT_MOUNTED", + "MOUNTED", + "CORRUPTED", + }[int(ms)] +} + +func getMountState(path string) (mountState, error) { + isMnt, err := util.IsMountPoint(path) + if err != nil { + if util.IsCorruptedMountError(err) { + return msCorrupted, nil + } + + return msUnknown, err + } + + if isMnt { + return msMounted, nil + } + + return msNotMounted, nil +} + +func findMountinfo(mountpoint string, mis []mountutil.MountInfo) int { + for i := range mis { + if mis[i].MountPoint == mountpoint { + return i + } + } + + return -1 +} + +// Ensures that given mountpoint is of specified fstype. +// Returns true if fstype matches, or if no such mountpoint exists. +func validateFsType(mountpoint, fsType string, mis []mountutil.MountInfo) bool { + if idx := findMountinfo(mountpoint, mis); idx > 0 { + mi := mis[idx] + + if mi.FsType != fsType { + return false + } + } + + return true +} + +// tryRestoreFuseMountsInNodePublish tries to restore staging and publish +// volume moutpoints inside the NodePublishVolume call. +// +// Restoration is performed in following steps: +// 1. Detection: staging target path must be a working mountpoint, and target +// path must not be a corrupted mountpoint (see getMountState()). If either +// of those checks fail, mount recovery is performed. +// 2. Recovery preconditions: +// * NodeStageMountinfo is present for this volume, +// * if staging target path and target path are mountpoints, they must be +// managed by ceph-fuse, +// * VolumeOptions.Mounter must evaluate to "fuse". +// 3. Recovery: +// * staging target path is unmounted and mounted again using ceph-fuse, +// * target path is only unmounted; NodePublishVolume is then expected to +// continue normally. +func (ns *NodeServer) tryRestoreFuseMountsInNodePublish( + ctx context.Context, + volID fsutil.VolumeID, + stagingTargetPath string, + targetPath string, + volContext map[string]string, +) error { + // Check if there is anything to restore. + + stagingTargetMs, err := getMountState(stagingTargetPath) + if err != nil { + return err + } + + targetMs, err := getMountState(targetPath) + if err != nil { + return err + } + + if stagingTargetMs == msMounted && targetMs != msCorrupted { + // Mounts seem to be fine. + return nil + } + + // Something is broken. Try to proceed with mount recovery. + + log.WarningLog(ctx, "cephfs: mount problem detected when publishing a volume: %s is %s, %s is %s; attempting recovery", + stagingTargetPath, stagingTargetMs, targetPath, targetMs) + + // NodeStageMountinfo entry must be present for this volume. + + var nsMountinfo *fsutil.NodeStageMountinfo + + if nsMountinfo, err = fsutil.GetNodeStageMountinfo(volID); err != nil { + return err + } else if nsMountinfo == nil { + log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery because NodeStageMountinfo record is missing") + + return nil + } + + // Check that the existing stage and publish mounts for this volume are + // managed by ceph-fuse, and that the mounter is of the FuseMounter type. + // Then try to restore them. + + var ( + volMounter mounter.VolumeMounter + volOptions *store.VolumeOptions + ) + + procMountInfo, err := util.ReadMountInfoForProc("self") + if err != nil { + return err + } + + if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) || + !validateFsType(targetPath, cephFuseFsType, procMountInfo) { + // We can't restore mounts not managed by ceph-fuse. + log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints") + + return nil + } + + volOptions, err = ns.getVolumeOptions(ctx, volID, volContext, nsMountinfo.Secrets) + if err != nil { + return err + } + + volMounter, err = mounter.New(volOptions) + if err != nil { + return err + } + + if _, ok := volMounter.(*mounter.FuseMounter); !ok { + // We can't restore mounts with non-FUSE mounter. + log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter") + + return nil + } + + // Try to restore mount in staging target path. + // Unmount and mount the volume. + + if stagingTargetMs != msMounted { + if err := mounter.UnmountVolume(ctx, stagingTargetPath); err != nil { + return err + } + + if err := ns.mount( + ctx, + volMounter, + volOptions, + volID, + stagingTargetPath, + nsMountinfo.Secrets, + nsMountinfo.VolumeCapability, + ); err != nil { + return err + } + } + + // Try to restore mount in target path. + // Only unmount the bind mount. NodePublishVolume should then + // create the bind mount by itself. + + if err := mounter.UnmountVolume(ctx, targetPath); err != nil { + return err + } + + return nil +} + +// Try to restore FUSE mount of the staging target path in NodeStageVolume. +// If corruption is detected, try to only unmount the volume. NodeStageVolume +// should be able to continue with mounting the volume normally afterwards. +func (ns *NodeServer) tryRestoreFuseMountInNodeStage( + ctx context.Context, + mnt mounter.VolumeMounter, + stagingTargetPath string, +) error { + // Check if there is anything to restore. + + stagingTargetMs, err := getMountState(stagingTargetPath) + if err != nil { + return err + } + + if stagingTargetMs != msCorrupted { + // Mounts seem to be fine. + return nil + } + + // Something is broken. Try to proceed with mount recovery. + + log.WarningLog(ctx, "cephfs: mountpoint problem detected when staging a volume: %s is %s; attempting recovery", + stagingTargetPath, stagingTargetMs) + + // Check that the existing stage mount for this volume is managed by + // ceph-fuse, and that the mounter is FuseMounter. Then try to restore them. + + procMountInfo, err := util.ReadMountInfoForProc("self") + if err != nil { + return err + } + + if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) { + // We can't restore mounts not managed by ceph-fuse. + log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints") + + return nil + } + + if _, ok := mnt.(*mounter.FuseMounter); !ok { + // We can't restore mounts with non-FUSE mounter. + log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter") + + return nil + } + + // Restoration here means only unmounting the volume. + // NodeStageVolume should take care of the rest. + return mounter.UnmountVolume(ctx, stagingTargetPath) +} diff --git a/internal/cephfs/nodeserver.go b/internal/cephfs/nodeserver.go index 573a0e543..21457ff38 100644 --- a/internal/cephfs/nodeserver.go +++ b/internal/cephfs/nodeserver.go @@ -47,11 +47,10 @@ type NodeServer struct { func getCredentialsForVolume( volOptions *store.VolumeOptions, - req *csi.NodeStageVolumeRequest) (*util.Credentials, error) { + secrets map[string]string) (*util.Credentials, error) { var ( - err error - cr *util.Credentials - secrets = req.GetSecrets() + err error + cr *util.Credentials ) if volOptions.ProvisionVolume { @@ -64,7 +63,7 @@ func getCredentialsForVolume( } else { // The volume is pre-made, credentials are in node stage secrets - cr, err = util.NewUserCredentials(req.GetSecrets()) + cr, err = util.NewUserCredentials(secrets) if err != nil { return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %w", err) } @@ -73,11 +72,38 @@ func getCredentialsForVolume( return cr, nil } +func (ns *NodeServer) getVolumeOptions( + ctx context.Context, + volID fsutil.VolumeID, + volContext, + volSecrets map[string]string, +) (*store.VolumeOptions, error) { + volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), volContext, volSecrets) + if err != nil { + if !errors.Is(err, cerrors.ErrInvalidVolID) { + return nil, status.Error(codes.Internal, err.Error()) + } + + volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), volContext) + if err != nil { + if !errors.Is(err, cerrors.ErrNonStaticVolume) { + return nil, status.Error(codes.Internal, err.Error()) + } + + volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), volContext, volSecrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + + return volOptions, nil +} + // NodeStageVolume mounts the volume to a staging path on the node. func (ns *NodeServer) NodeStageVolume( ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - var volOptions *store.VolumeOptions if err := util.ValidateNodeStageVolumeRequest(req); err != nil { return nil, err } @@ -94,31 +120,25 @@ func (ns *NodeServer) NodeStageVolume( } defer ns.VolumeLocks.Release(req.GetVolumeId()) - volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets()) + volOptions, err := ns.getVolumeOptions(ctx, volID, req.GetVolumeContext(), req.GetSecrets()) if err != nil { - if !errors.Is(err, cerrors.ErrInvalidVolID) { - return nil, status.Error(codes.Internal, err.Error()) - } - - // gets mon IPs from the supplied cluster info - volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext()) - if err != nil { - if !errors.Is(err, cerrors.ErrNonStaticVolume) { - return nil, status.Error(codes.Internal, err.Error()) - } - - // get mon IPs from the volume context - volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(), - req.GetSecrets()) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } + return nil, status.Error(codes.Internal, err.Error()) } defer volOptions.Destroy() + mnt, err := mounter.New(volOptions) + if err != nil { + log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err) + + return nil, status.Error(codes.Internal, err.Error()) + } + // Check if the volume is already mounted + if err = ns.tryRestoreFuseMountInNodeStage(ctx, mnt, stagingTargetPath); err != nil { + return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err) + } + isMnt, err := util.IsMountPoint(stagingTargetPath) if err != nil { log.ErrorLog(ctx, "stat failed: %v", err) @@ -133,20 +153,53 @@ func (ns *NodeServer) NodeStageVolume( } // It's not, mount now - if err = ns.mount(ctx, volOptions, req); err != nil { + + if err = ns.mount( + ctx, + mnt, + volOptions, + fsutil.VolumeID(req.GetVolumeId()), + req.GetStagingTargetPath(), + req.GetSecrets(), + req.GetVolumeCapability(), + ); err != nil { return nil, err } log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath) + if _, isFuse := mnt.(*mounter.FuseMounter); isFuse { + // FUSE mount recovery needs NodeStageMountinfo records. + + if err = fsutil.WriteNodeStageMountinfo(volID, &fsutil.NodeStageMountinfo{ + VolumeCapability: req.GetVolumeCapability(), + Secrets: req.GetSecrets(), + }); err != nil { + log.ErrorLog(ctx, "cephfs: failed to write NodeStageMountinfo for volume %s: %v", volID, err) + + // Try to clean node stage mount. + if unmountErr := mounter.UnmountVolume(ctx, stagingTargetPath); unmountErr != nil { + log.ErrorLog(ctx, "cephfs: failed to unmount %s in WriteNodeStageMountinfo clean up: %v", + stagingTargetPath, unmountErr) + } + + return nil, status.Error(codes.Internal, err.Error()) + } + } + return &csi.NodeStageVolumeResponse{}, nil } -func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, req *csi.NodeStageVolumeRequest) error { - stagingTargetPath := req.GetStagingTargetPath() - volID := fsutil.VolumeID(req.GetVolumeId()) - - cr, err := getCredentialsForVolume(volOptions, req) +func (*NodeServer) mount( + ctx context.Context, + mnt mounter.VolumeMounter, + volOptions *store.VolumeOptions, + volID fsutil.VolumeID, + stagingTargetPath string, + secrets map[string]string, + volCap *csi.VolumeCapability, +) error { + cr, err := getCredentialsForVolume(volOptions, secrets) if err != nil { log.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err) @@ -154,20 +207,13 @@ func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, r } defer cr.DeleteCredentials() - m, err := mounter.New(volOptions) - if err != nil { - log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err) - - return status.Error(codes.Internal, err.Error()) - } - - log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.Name()) + log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name()) readOnly := "ro" - if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || - req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { - switch m.(type) { + if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || + volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { + switch mnt.(type) { case *mounter.FuseMounter: if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) { volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly) @@ -179,7 +225,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, r } } - if err = m.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil { + if err = mnt.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil { log.ErrorLog(ctx, "failed to mount volume %s: %v Check dmesg logs if required.", volID, @@ -201,8 +247,9 @@ func (ns *NodeServer) NodePublishVolume( return nil, err } + stagingTargetPath := req.GetStagingTargetPath() targetPath := req.GetTargetPath() - volID := req.GetVolumeId() + volID := fsutil.VolumeID(req.GetVolumeId()) // Considering kubelet make sure the stage and publish operations // are serialized, we dont need any extra locking in nodePublish @@ -213,12 +260,34 @@ func (ns *NodeServer) NodePublishVolume( return nil, status.Error(codes.Internal, err.Error()) } + if err := ns.tryRestoreFuseMountsInNodePublish( + ctx, + volID, + stagingTargetPath, + targetPath, + req.GetVolumeContext(), + ); err != nil { + return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err) + } + if req.GetReadonly() { mountOptions = append(mountOptions, "ro") } mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability()) + // Ensure staging target path is a mountpoint. + + if isMnt, err := util.IsMountPoint(stagingTargetPath); err != nil { + log.ErrorLog(ctx, "stat failed: %v", err) + + return nil, status.Error(codes.Internal, err.Error()) + } else if !isMnt { + return nil, status.Errorf( + codes.Internal, "staging path %s for volume %s is not a mountpoint", stagingTargetPath, volID, + ) + } + // Check if the volume is already mounted isMnt, err := util.IsMountPoint(targetPath) @@ -238,8 +307,8 @@ func (ns *NodeServer) NodePublishVolume( if err = mounter.BindMount( ctx, - req.GetStagingTargetPath(), - req.GetTargetPath(), + stagingTargetPath, + targetPath, req.GetReadonly(), mountOptions); err != nil { log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err) @@ -260,11 +329,14 @@ func (ns *NodeServer) NodeUnpublishVolume( if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil { return nil, err } + // considering kubelet make sure node operations like unpublish/unstage...etc can not be called // at same time, an explicit locking at time of nodeunpublish is not required. targetPath := req.GetTargetPath() isMnt, err := util.IsMountPoint(targetPath) if err != nil { + log.ErrorLog(ctx, "stat failed: %v", err) + if os.IsNotExist(err) { // targetPath has already been deleted log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath) @@ -272,7 +344,14 @@ func (ns *NodeServer) NodeUnpublishVolume( return &csi.NodeUnpublishVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, err.Error()) + if !util.IsCorruptedMountError(err) { + return nil, status.Error(codes.Internal, err.Error()) + } + + // Corrupted mounts need to be unmounted properly too, + // regardless of the mounter used. Continue as normal. + log.DebugLog(ctx, "cephfs: detected corrupted mount in publish target path %s, trying to unmount anyway", targetPath) + isMnt = true } if !isMnt { if err = os.RemoveAll(targetPath); err != nil { @@ -316,8 +395,16 @@ func (ns *NodeServer) NodeUnstageVolume( stagingTargetPath := req.GetStagingTargetPath() + if err = fsutil.RemoveNodeStageMountinfo(fsutil.VolumeID(volID)); err != nil { + log.ErrorLog(ctx, "cephfs: failed to remove NodeStageMountinfo for volume %s: %v", volID, err) + + return nil, status.Error(codes.Internal, err.Error()) + } + isMnt, err := util.IsMountPoint(stagingTargetPath) if err != nil { + log.ErrorLog(ctx, "stat failed: %v", err) + if os.IsNotExist(err) { // targetPath has already been deleted log.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath) @@ -325,7 +412,16 @@ func (ns *NodeServer) NodeUnstageVolume( return &csi.NodeUnstageVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, err.Error()) + if !util.IsCorruptedMountError(err) { + return nil, status.Error(codes.Internal, err.Error()) + } + + // Corrupted mounts need to be unmounted properly too, + // regardless of the mounter used. Continue as normal. + log.DebugLog(ctx, + "cephfs: detected corrupted mount in staging target path %s, trying to unmount anyway", + stagingTargetPath) + isMnt = true } if !isMnt { return &csi.NodeUnstageVolumeResponse{}, nil diff --git a/internal/cephfs/util/mountinfo.go b/internal/cephfs/util/mountinfo.go new file mode 100644 index 000000000..cca1320f6 --- /dev/null +++ b/internal/cephfs/util/mountinfo.go @@ -0,0 +1,149 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/container-storage-interface/spec/lib/go/csi" + // google.golang.org/protobuf/encoding doesn't offer MessageV2(). + "github.com/golang/protobuf/proto" // nolint:staticcheck // See comment above. + "google.golang.org/protobuf/encoding/protojson" +) + +// This file provides functionality to store various mount information +// in a file. It's currently used to restore ceph-fuse mounts. +// Mount info is stored in `/csi/mountinfo`. + +const ( + mountinfoDir = "/csi/mountinfo" +) + +// nodeStageMountinfoRecord describes a single +// record of mountinfo of a staged volume. +// encoding/json-friendly format. +// Only for internal use for marshaling and unmarshaling. +type nodeStageMountinfoRecord struct { + VolumeCapabilityProtoJSON string `json:",omitempty"` + MountOptions []string `json:",omitempty"` + Secrets map[string]string `json:",omitempty"` +} + +// NodeStageMountinfo describes mountinfo of a volume. +type NodeStageMountinfo struct { + VolumeCapability *csi.VolumeCapability + Secrets map[string]string + MountOptions []string +} + +func fmtNodeStageMountinfoFilename(volID VolumeID) string { + return path.Join(mountinfoDir, fmt.Sprintf("nodestage-%s.json", volID)) +} + +func (mi *NodeStageMountinfo) toNodeStageMountinfoRecord() (*nodeStageMountinfoRecord, error) { + bs, err := protojson.Marshal(proto.MessageV2(mi.VolumeCapability)) + if err != nil { + return nil, err + } + + return &nodeStageMountinfoRecord{ + VolumeCapabilityProtoJSON: string(bs), + MountOptions: mi.MountOptions, + Secrets: mi.Secrets, + }, nil +} + +func (r *nodeStageMountinfoRecord) toNodeStageMountinfo() (*NodeStageMountinfo, error) { + volCapability := &csi.VolumeCapability{} + if err := protojson.Unmarshal([]byte(r.VolumeCapabilityProtoJSON), proto.MessageV2(volCapability)); err != nil { + return nil, err + } + + return &NodeStageMountinfo{ + VolumeCapability: volCapability, + MountOptions: r.MountOptions, + Secrets: r.Secrets, + }, nil +} + +// WriteNodeStageMountinfo writes mount info to a file. +func WriteNodeStageMountinfo(volID VolumeID, mi *NodeStageMountinfo) error { + // Write NodeStageMountinfo into JSON-formatted byte slice. + + r, err := mi.toNodeStageMountinfoRecord() + if err != nil { + return err + } + + bs, err := json.Marshal(r) + if err != nil { + return err + } + + // Write the byte slice into file. + + err = os.WriteFile(fmtNodeStageMountinfoFilename(volID), bs, 0o600) + if os.IsNotExist(err) { + return nil + } + + return err +} + +// GetNodeStageMountinfo tries to retrieve NodeStageMountinfoRecord for `volID`. +// If it doesn't exist, `(nil, nil)` is returned. +func GetNodeStageMountinfo(volID VolumeID) (*NodeStageMountinfo, error) { + // Read the file. + + bs, err := os.ReadFile(fmtNodeStageMountinfoFilename(volID)) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + // Unmarshall JSON-formatted byte slice into NodeStageMountinfo struct. + + r := &nodeStageMountinfoRecord{} + if err = json.Unmarshal(bs, r); err != nil { + return nil, err + } + + mi, err := r.toNodeStageMountinfo() + if err != nil { + return nil, err + } + + return mi, err +} + +// RemoveNodeStageMountinfo tries to remove NodeStageMountinfo for `volID`. +// If no such record exists for `volID`, it's considered success too. +func RemoveNodeStageMountinfo(volID VolumeID) error { + if err := os.Remove(fmtNodeStageMountinfoFilename(volID)); err != nil { + if !os.IsNotExist(err) { + return err + } + } + + return nil +} diff --git a/internal/controller/persistentvolume/persistentvolume.go b/internal/controller/persistentvolume/persistentvolume.go index 8c9fa6de4..e6957d88c 100644 --- a/internal/controller/persistentvolume/persistentvolume.go +++ b/internal/controller/persistentvolume/persistentvolume.go @@ -139,6 +139,12 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime. if pv.Spec.CSI == nil || pv.Spec.CSI.Driver != r.config.DriverName { return nil } + // PV is not attached to any PVC + if pv.Spec.ClaimRef == nil { + return nil + } + + pvcNamespace := pv.Spec.ClaimRef.Namespace requestName := pv.Name volumeHandler := pv.Spec.CSI.VolumeHandle secretName := "" @@ -171,7 +177,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime. } defer cr.DeleteCredentials() - rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, cr) + rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, pvcNamespace, cr) if err != nil { log.ErrorLogMsg("failed to regenerate journal %s", err) diff --git a/internal/csi-common/controllerserver-default.go b/internal/csi-common/controllerserver-default.go index 96b005989..3daa31ca7 100644 --- a/internal/csi-common/controllerserver-default.go +++ b/internal/csi-common/controllerserver-default.go @@ -45,13 +45,6 @@ func (cs *DefaultControllerServer) ControllerUnpublishVolume( return nil, status.Error(codes.Unimplemented, "") } -// ControllerExpandVolume expand volume. -func (cs *DefaultControllerServer) ControllerExpandVolume( - ctx context.Context, - req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - // ListVolumes lists volumes. func (cs *DefaultControllerServer) ListVolumes( ctx context.Context, @@ -81,20 +74,6 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities( }, nil } -// CreateSnapshot creates snapshot. -func (cs *DefaultControllerServer) CreateSnapshot( - ctx context.Context, - req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// DeleteSnapshot deletes snapshot. -func (cs *DefaultControllerServer) DeleteSnapshot( - ctx context.Context, - req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - // ListSnapshots lists snapshots. func (cs *DefaultControllerServer) ListSnapshots( ctx context.Context, diff --git a/internal/csi-common/nodeserver-default.go b/internal/csi-common/nodeserver-default.go index fe848bdab..6a725995b 100644 --- a/internal/csi-common/nodeserver-default.go +++ b/internal/csi-common/nodeserver-default.go @@ -32,20 +32,6 @@ type DefaultNodeServer struct { Type string } -// NodeStageVolume returns unimplemented response. -func (ns *DefaultNodeServer) NodeStageVolume( - ctx context.Context, - req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// NodeUnstageVolume returns unimplemented response. -func (ns *DefaultNodeServer) NodeUnstageVolume( - ctx context.Context, - req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - // NodeExpandVolume returns unimplemented response. func (ns *DefaultNodeServer) NodeExpandVolume( ctx context.Context, @@ -88,13 +74,6 @@ func (ns *DefaultNodeServer) NodeGetCapabilities( }, nil } -// NodeGetVolumeStats returns volume stats. -func (ns *DefaultNodeServer) NodeGetVolumeStats( - ctx context.Context, - req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - // ConstructMountOptions returns only unique mount options in slice. func ConstructMountOptions(mountOptions []string, volCap *csi.VolumeCapability) []string { if m := volCap.GetMount(); m != nil { diff --git a/internal/journal/voljournal.go b/internal/journal/voljournal.go index fbd02131c..23f5267f7 100644 --- a/internal/journal/voljournal.go +++ b/internal/journal/voljournal.go @@ -742,6 +742,36 @@ func (conn *Connection) StoreImageID(ctx context.Context, pool, reservedUUID, im return nil } +// StoreAttribute stores an attribute (key/value) in omap. +func (conn *Connection) StoreAttribute(ctx context.Context, pool, reservedUUID, attribute, value string) error { + key := conn.config.commonPrefix + attribute + err := setOMapKeys(ctx, conn, pool, conn.config.namespace, conn.config.cephUUIDDirectoryPrefix+reservedUUID, + map[string]string{key: value}) + if err != nil { + return fmt.Errorf("failed to set key %q to %q: %w", key, value, err) + } + + return nil +} + +// FetchAttribute fetches an attribute (key) in omap. +func (conn *Connection) FetchAttribute(ctx context.Context, pool, reservedUUID, attribute string) (string, error) { + key := conn.config.commonPrefix + attribute + values, err := getOMapValues( + ctx, conn, pool, conn.config.namespace, conn.config.cephUUIDDirectoryPrefix+reservedUUID, + conn.config.commonPrefix, []string{key}) + if err != nil { + return "", fmt.Errorf("failed to get values for key %q from OMAP: %w", key, err) + } + + value, ok := values[key] + if !ok { + return "", fmt.Errorf("failed to find key %q in returned map: %v", key, values) + } + + return value, nil +} + // Destroy frees any resources and invalidates the journal connection. func (conn *Connection) Destroy() { // invalidate cluster connection metadata diff --git a/internal/kms/aws_sts_metadata.go b/internal/kms/aws_sts_metadata.go new file mode 100644 index 000000000..a0db764d8 --- /dev/null +++ b/internal/kms/aws_sts_metadata.go @@ -0,0 +1,236 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "os" + + "github.com/ceph/ceph-csi/internal/util/k8s" + + awsSTS "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go/aws" + awsCreds "github.com/aws/aws-sdk-go/aws/credentials" + awsSession "github.com/aws/aws-sdk-go/aws/session" + awsKMS "github.com/aws/aws-sdk-go/service/kms" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + kmsTypeAWSSTSMetadata = "aws-sts-metadata" + + // awsRoleSessionName is the name of the role session to connect with aws STS. + awsRoleSessionName = "ceph-csi-aws-sts-metadata" + + // awsMetadataDefaultSecretsName is the default name of the Kubernetes Secret + // that contains the credentials to access the Amazon KMS. The name of + // the Secret can be configured by setting the `kmsSecretName` + // option. + // + // #nosec:G101, value not credential, just references token. + awsSTSMetadataDefaultSecretsName = "ceph-csi-aws-credentials" + + // awsSTSSecretNameKey is the key for the secret name in the config map. + awsSTSSecretNameKey = "secretName" + + // The following options are part of the Kubernetes Secrets. + // + // #nosec:G101, value not credential, just configuration keys. + awsSTSRoleARNKey = "awsRoleARN" + awsSTSCMKARNKey = "awsCMKARN" + awsSTSRegionKey = "awsRegion" + + // tokenFilePath is the path to the file containing the OIDC token. + // + // #nosec:G101, value not credential, just path to the token. + tokenFilePath = "/run/secrets/tokens/oidc-token" +) + +var _ = RegisterProvider(Provider{ + UniqueID: kmsTypeAWSSTSMetadata, + Initializer: initAWSSTSMetadataKMS, +}) + +type awsSTSMetadataKMS struct { + awsMetadataKMS + + // AWS STS configuration options + role string +} + +func initAWSSTSMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) { + kms := &awsSTSMetadataKMS{ + awsMetadataKMS: awsMetadataKMS{ + namespace: args.Tenant, + }, + } + + // get secret name if set, else use default. + err := setConfigString(&kms.secretName, args.Config, awsSTSSecretNameKey) + if errors.Is(err, errConfigOptionInvalid) { + return nil, err + } else if errors.Is(err, errConfigOptionMissing) { + kms.secretName = awsSTSMetadataDefaultSecretsName + } + + // read the Kubernetes Secret with aws region, role & cmk ARN. + secrets, err := kms.getSecrets() + if err != nil { + return nil, fmt.Errorf("failed to get secrets: %w", err) + } + + var found bool + kms.role, found = secrets[awsSTSRoleARNKey] + if !found { + return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSRoleARNKey) + } + + kms.cmk, found = secrets[awsSTSCMKARNKey] + if !found { + return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSCMKARNKey) + } + + kms.region, found = secrets[awsSTSRegionKey] + if !found { + return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSRegionKey) + } + + return kms, nil +} + +// getSecrets returns required STS configuration options from the Kubernetes Secret. +func (as *awsSTSMetadataKMS) getSecrets() (map[string]string, error) { + c, err := k8s.NewK8sClient() + if err != nil { + return nil, fmt.Errorf("failed to connect to Kubernetes to "+ + "get Secret %s/%s: %w", as.namespace, as.secretName, err) + } + + secret, err := c.CoreV1().Secrets(as.namespace).Get(context.TODO(), + as.secretName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get Secret %s/%s: %w", + as.namespace, as.secretName, err) + } + + config := make(map[string]string) + for k, v := range secret.Data { + switch k { + case awsSTSRoleARNKey, awsSTSRegionKey, awsSTSCMKARNKey: + config[k] = string(v) + default: + return nil, fmt.Errorf("unsupported option for KMS "+ + "provider %q: %s", kmsTypeAWSMetadata, k) + } + } + + return config, nil +} + +// getWebIdentityToken returns the web identity token from the file. +func (as *awsSTSMetadataKMS) getWebIdentityToken() (string, error) { + buf, err := os.ReadFile(tokenFilePath) + if err != nil { + return "", fmt.Errorf("failed to read oidc token file %q: %w", + tokenFilePath, err) + } + + return string(buf), nil +} + +// getServiceWithSTS returns a new awsSession established with the STS. +func (as *awsSTSMetadataKMS) getServiceWithSTS() (*awsKMS.KMS, error) { + webIdentityToken, err := as.getWebIdentityToken() + if err != nil { + return nil, fmt.Errorf("failed to get web identity token: %w", err) + } + + client := awsSTS.New(awsSTS.Options{ + Region: as.region, + }) + output, err := client.AssumeRoleWithWebIdentity(context.TODO(), + &awsSTS.AssumeRoleWithWebIdentityInput{ + RoleArn: aws.String(as.role), + RoleSessionName: aws.String(awsRoleSessionName), + WebIdentityToken: aws.String(webIdentityToken), + }) + if err != nil { + return nil, fmt.Errorf("failed to assume role with web identity token: %w", err) + } + + creds := awsCreds.NewStaticCredentials(*output.Credentials.AccessKeyId, + *output.Credentials.SecretAccessKey, *output.Credentials.SessionToken) + + sess, err := awsSession.NewSessionWithOptions(awsSession.Options{ + SharedConfigState: awsSession.SharedConfigDisable, + Config: aws.Config{ + Credentials: creds, + Region: &as.region, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to create AWS session: %w", err) + } + + return awsKMS.New(sess), nil +} + +// EncryptDEK uses the Amazon KMS and the configured CMK to encrypt the DEK. +func (as *awsSTSMetadataKMS) EncryptDEK(_, plainDEK string) (string, error) { + svc, err := as.getServiceWithSTS() + if err != nil { + return "", fmt.Errorf("failed to get KMS service: %w", err) + } + + result, err := svc.Encrypt(&awsKMS.EncryptInput{ + KeyId: aws.String(as.cmk), + Plaintext: []byte(plainDEK), + }) + if err != nil { + return "", fmt.Errorf("failed to encrypt DEK: %w", err) + } + + // base64 encode the encrypted DEK, so that storing it should not have + // issues + return base64.StdEncoding.EncodeToString(result.CiphertextBlob), nil +} + +// DecryptDEK uses the Amazon KMS and the configured CMK to decrypt the DEK. +func (as *awsSTSMetadataKMS) DecryptDEK(_, encryptedDEK string) (string, error) { + svc, err := as.getServiceWithSTS() + if err != nil { + return "", fmt.Errorf("failed to get KMS service: %w", err) + } + + ciphertextBlob, err := base64.StdEncoding.DecodeString(encryptedDEK) + if err != nil { + return "", fmt.Errorf("failed to decode base64 cipher: %w", + err) + } + + result, err := svc.Decrypt(&awsKMS.DecryptInput{ + CiphertextBlob: ciphertextBlob, + }) + if err != nil { + return "", fmt.Errorf("failed to decrypt DEK: %w", err) + } + + return string(result.Plaintext), nil +} diff --git a/internal/kms/aws_sts_metadata_test.go b/internal/kms/aws_sts_metadata_test.go new file mode 100644 index 000000000..853e1716d --- /dev/null +++ b/internal/kms/aws_sts_metadata_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAWSSTSMetadataKMSRegistered(t *testing.T) { + t.Parallel() + _, ok := kmsManager.providers[kmsTypeAWSSTSMetadata] + assert.True(t, ok) +} diff --git a/internal/nfs/controller/controllerserver.go b/internal/nfs/controller/controllerserver.go new file mode 100644 index 000000000..c49c95d7a --- /dev/null +++ b/internal/nfs/controller/controllerserver.go @@ -0,0 +1,152 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + + "github.com/ceph/ceph-csi/internal/cephfs" + "github.com/ceph/ceph-csi/internal/cephfs/store" + fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" + csicommon "github.com/ceph/ceph-csi/internal/csi-common" + "github.com/ceph/ceph-csi/internal/journal" + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" + + "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Server struct of CEPH CSI driver with supported methods of CSI controller +// server spec. +type Server struct { + csi.UnimplementedControllerServer + + // backendServer handles the CephFS requests + backendServer *cephfs.ControllerServer +} + +// NewControllerServer initialize a controller server for ceph CSI driver. +func NewControllerServer(d *csicommon.CSIDriver) *Server { + // global instance of the volume journal, yuck + store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(cephfs.CSIInstanceID, fsutil.RadosNamespace) + + return &Server{ + backendServer: cephfs.NewControllerServer(d), + } +} + +// ControllerGetCapabilities uses the CephFS backendServer to return the +// capabilities that were set in the Driver.Run() function. +func (cs *Server) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { + return cs.backendServer.ControllerGetCapabilities(ctx, req) +} + +// ValidateVolumeCapabilities checks whether the volume capabilities requested +// are supported. +func (cs *Server) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + return cs.backendServer.ValidateVolumeCapabilities(ctx, req) +} + +// CreateVolume creates the backing subvolume and on any error cleans up any +// created entities. +func (cs *Server) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + res, err := cs.backendServer.CreateVolume(ctx, req) + if err != nil { + return res, fmt.Errorf("failed to create CephFS volume: %w", err) + } + + backend := res.Volume + + log.DebugLog(ctx, "CephFS volume created: %s", backend.VolumeId) + + secret := req.GetSecrets() + cr, err := util.NewAdminCredentials(secret) + if err != nil { + log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err) + + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + defer cr.DeleteCredentials() + + nfsVolume, err := NewNFSVolume(ctx, backend.VolumeId) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + err = nfsVolume.Connect(cr) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to connect: %v", err) + } + defer nfsVolume.Destroy() + + err = nfsVolume.CreateExport(backend) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to create export: %v", err) + } + + log.DebugLog(ctx, "published NFS-export: %s", nfsVolume) + + // volume has been exported over NFS, set the "share" parameter to + // allow mounting + backend.VolumeContext["share"] = nfsVolume.GetExportPath() + + return &csi.CreateVolumeResponse{Volume: backend}, nil +} + +// DeleteVolume deletes the volume in backend and its reservation. +func (cs *Server) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + secret := req.GetSecrets() + cr, err := util.NewAdminCredentials(secret) + if err != nil { + log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err) + + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + defer cr.DeleteCredentials() + + nfsVolume, err := NewNFSVolume(ctx, req.GetVolumeId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + err = nfsVolume.Connect(cr) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to connect: %v", err) + } + defer nfsVolume.Destroy() + + err = nfsVolume.DeleteExport() + // TODO: if the export does not exist, but the backend does, delete the backend + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to delete export: %v", err) + } + + log.DebugLog(ctx, "deleted NFS-export: %s", nfsVolume) + + return cs.backendServer.DeleteVolume(ctx, req) +} diff --git a/internal/nfs/controller/volume.go b/internal/nfs/controller/volume.go new file mode 100644 index 000000000..c3c2f5bf7 --- /dev/null +++ b/internal/nfs/controller/volume.go @@ -0,0 +1,317 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + + fscore "github.com/ceph/ceph-csi/internal/cephfs/core" + "github.com/ceph/ceph-csi/internal/cephfs/store" + fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" + "github.com/ceph/ceph-csi/internal/util" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +const ( + // clusterNameKey is the key in OMAP that contains the name of the + // NFS-cluster. It will be prefixed with the journal configuration. + clusterNameKey = "nfs.cluster" +) + +// NFSVolume presents the API for consumption by the CSI-controller to create, +// modify and delete the NFS-exported CephFS volume. Instances of this struct +// are short lived, they only exist as long as a CSI-procedure is active. +type NFSVolume struct { + // ctx is the context for this short living volume object + ctx context.Context + + volumeID string + clusterID string + mons string + fscID int64 + objectUUID string + + // TODO: drop in favor of a go-ceph connection + cr *util.Credentials + connected bool + conn *util.ClusterConnection +} + +// NewNFSVolume create a new NFSVolume instance for the currently executing +// CSI-procedure. +func NewNFSVolume(ctx context.Context, volumeID string) (*NFSVolume, error) { + vi := util.CSIIdentifier{} + + err := vi.DecomposeCSIID(volumeID) + if err != nil { + return nil, fmt.Errorf("error decoding volume ID (%s): %w", volumeID, err) + } + + return &NFSVolume{ + ctx: ctx, + volumeID: volumeID, + clusterID: vi.ClusterID, + fscID: vi.LocationID, + objectUUID: vi.ObjectUUID, + conn: &util.ClusterConnection{}, + }, nil +} + +// String returns a simple/short representation of the NFSVolume. +func (nv *NFSVolume) String() string { + return nv.volumeID +} + +// Connect fetches cluster connection details (like MONs) and connects to the +// Ceph cluster. This uses go-ceph, so after Connect(), Destroy() should be +// called to cleanup resources. +func (nv *NFSVolume) Connect(cr *util.Credentials) error { + if nv.connected { + return nil + } + + var err error + nv.mons, err = util.Mons(util.CsiConfigFile, nv.clusterID) + if err != nil { + return fmt.Errorf("failed to get MONs for cluster (%s): %w", nv.clusterID, err) + } + + err = nv.conn.Connect(nv.mons, cr) + if err != nil { + return fmt.Errorf("failed to connect to cluster: %w", err) + } + + nv.cr = cr + nv.connected = true + + return nil +} + +// Destroy cleans up resources once the NFSVolume instance is not needed +// anymore. +func (nv *NFSVolume) Destroy() { + if nv.connected { + nv.conn.Destroy() + nv.connected = false + } +} + +// GetExportPath returns the path on the NFS-server that can be used for +// mounting. +func (nv *NFSVolume) GetExportPath() string { + return "/" + nv.volumeID +} + +// CreateExport takes the (CephFS) CSI-volume and instructs Ceph Mgr to create +// a new NFS-export for the volume on the Ceph managed NFS-server. +func (nv *NFSVolume) CreateExport(backend *csi.Volume) error { + if !nv.connected { + return fmt.Errorf("can not created export for %q: not connected", nv) + } + + fs := backend.VolumeContext["fsName"] + nfsCluster := backend.VolumeContext["nfsCluster"] + path := backend.VolumeContext["subvolumePath"] + + err := nv.setNFSCluster(nfsCluster) + if err != nil { + return fmt.Errorf("failed to set NFS-cluster: %w", err) + } + + // TODO: use new go-ceph API, see ceph/ceph-csi#2977 + // new versions of Ceph use a different command, and the go-ceph API + // also seems to be different :-/ + // + // run the new command, but fall back to the previous one in case of an + // error + cmds := [][]string{ + // ceph nfs export create cephfs --cluster-id + // --pseudo-path --fsname + // [--readonly] [--path=/path/in/cephfs] + nv.createExportCommand("--cluster-id="+nfsCluster, + "--fsname="+fs, "--pseudo-path="+nv.GetExportPath(), + "--path="+path), + // ceph nfs export create cephfs ${FS} ${NFS} /${EXPORT} ${SUBVOL_PATH} + nv.createExportCommand(nfsCluster, fs, nv.GetExportPath(), path), + } + + stderr, err := nv.retryIfInvalid(cmds) + if err != nil { + return fmt.Errorf("failed to create export %q in NFS-cluster %q"+ + "(%v): %s", nv, nfsCluster, err, stderr) + } + + return nil +} + +// retryIfInvalid executes the "ceph" command, and falls back to the next cmd +// in case the error is EINVAL. +func (nv *NFSVolume) retryIfInvalid(cmds [][]string) (string, error) { + var ( + stderr string + err error + ) + for _, cmd := range cmds { + _, stderr, err = util.ExecCommand(nv.ctx, "ceph", cmd...) + // in case of an invalid command, fallback to the next one + if strings.Contains(stderr, "Error EINVAL: invalid command") { + continue + } + + // If we get here, either no error, or an unexpected error + // happened. There is no need to retry an other command. + break + } + + return stderr, err +} + +// createExportCommand returns the "ceph nfs export create ..." command +// arguments (without "ceph"). The order of the parameters matches old Ceph +// releases, new Ceph releases added --option formats, which can be added when +// passing the parameters to this function. +func (nv *NFSVolume) createExportCommand(nfsCluster, fs, export, path string) []string { + return []string{ + "--id", nv.cr.ID, + "--keyfile=" + nv.cr.KeyFile, + "-m", nv.mons, + "nfs", + "export", + "create", + "cephfs", + fs, + nfsCluster, + export, + path, + } +} + +// DeleteExport removes the NFS-export from the Ceph managed NFS-server. +func (nv *NFSVolume) DeleteExport() error { + if !nv.connected { + return fmt.Errorf("can not delete export for %q: not connected", nv) + } + + nfsCluster, err := nv.getNFSCluster() + if err != nil { + return fmt.Errorf("failed to identify NFS cluster: %w", err) + } + + // TODO: use new go-ceph API, see ceph/ceph-csi#2977 + // new versions of Ceph use a different command, and the go-ceph API + // also seems to be different :-/ + // + // run the new command, but fall back to the previous one in case of an + // error + cmds := [][]string{ + // ceph nfs export rm + nv.deleteExportCommand("rm", nfsCluster), + // ceph nfs export delete + nv.deleteExportCommand("delete", nfsCluster), + } + + stderr, err := nv.retryIfInvalid(cmds) + if err != nil { + return fmt.Errorf("failed to delete export %q from NFS-cluster"+ + "%q (%v): %s", nv, nfsCluster, err, stderr) + } + + return nil +} + +// deleteExportCommand returns the "ceph nfs export delete ..." command +// arguments (without "ceph"). Old releases of Ceph expect "delete" as cmd, +// newer releases use "rm". +func (nv *NFSVolume) deleteExportCommand(cmd, nfsCluster string) []string { + return []string{ + "--id", nv.cr.ID, + "--keyfile=" + nv.cr.KeyFile, + "-m", nv.mons, + "nfs", + "export", + cmd, + nfsCluster, + nv.GetExportPath(), + } +} + +// getNFSCluster fetches the NFS-cluster name from the CephFS journal. +func (nv *NFSVolume) getNFSCluster() (string, error) { + if !nv.connected { + return "", fmt.Errorf("can not get NFS-cluster for %q: not connected", nv) + } + + fs := fscore.NewFileSystem(nv.conn) + fsName, err := fs.GetFsName(nv.ctx, nv.fscID) + if err != nil { + return "", fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err) + } + + mdPool, err := fs.GetMetadataPool(nv.ctx, fsName) + if err != nil { + return "", fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err) + } + + // Connect to cephfs' default radosNamespace (csi) + j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr) + if err != nil { + return "", fmt.Errorf("failed to connect to journal: %w", err) + } + defer j.Destroy() + + clusterName, err := j.FetchAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey) + if err != nil { + return "", fmt.Errorf("failed to get cluster name: %w", err) + } + + return clusterName, nil +} + +// setNFSCluster stores the NFS-cluster name in the CephFS journal. +func (nv *NFSVolume) setNFSCluster(clusterName string) error { + if !nv.connected { + return fmt.Errorf("can not set NFS-cluster for %q: not connected", nv) + } + + fs := fscore.NewFileSystem(nv.conn) + fsName, err := fs.GetFsName(nv.ctx, nv.fscID) + if err != nil { + return fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err) + } + + mdPool, err := fs.GetMetadataPool(nv.ctx, fsName) + if err != nil { + return fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err) + } + + // Connect to cephfs' default radosNamespace (csi) + j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr) + if err != nil { + return fmt.Errorf("failed to connect to journal: %w", err) + } + defer j.Destroy() + + err = j.StoreAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey, clusterName) + if err != nil { + return fmt.Errorf("failed to store cluster name: %w", err) + } + + return nil +} diff --git a/internal/nfs/driver/driver.go b/internal/nfs/driver/driver.go new file mode 100644 index 000000000..6fae5ef10 --- /dev/null +++ b/internal/nfs/driver/driver.go @@ -0,0 +1,77 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + csicommon "github.com/ceph/ceph-csi/internal/csi-common" + "github.com/ceph/ceph-csi/internal/nfs/controller" + "github.com/ceph/ceph-csi/internal/nfs/identity" + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// Driver contains the default identity and controller struct. +type Driver struct{} + +// NewDriver returns new ceph driver. +func NewDriver() *Driver { + return &Driver{} +} + +// Run start a non-blocking grpc controller,node and identityserver for +// ceph CSI driver which can serve multiple parallel requests. +func (fs *Driver) Run(conf *util.Config) { + // Initialize default library driver + cd := csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) + if cd == nil { + log.FatalLogMsg("failed to initialize CSI driver") + } + + cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, + }) + // VolumeCapabilities are validated by the CephFS Controller + cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{ + csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER, + csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER, + }) + + // Create gRPC servers + server := csicommon.NewNonBlockingGRPCServer() + srv := csicommon.Servers{ + IS: identity.NewIdentityServer(cd), + CS: controller.NewControllerServer(cd), + } + server.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics) + if conf.EnableGRPCMetrics { + log.WarningLogMsg("EnableGRPCMetrics is deprecated") + go util.StartMetricsServer(conf) + } + if conf.EnableProfiling { + if !conf.EnableGRPCMetrics { + go util.StartMetricsServer(conf) + } + log.DebugLogMsg("Registering profiling handler") + go util.EnableProfiling() + } + server.Wait() +} diff --git a/internal/nfs/identity/identityserver.go b/internal/nfs/identity/identityserver.go new file mode 100644 index 000000000..89bb0f508 --- /dev/null +++ b/internal/nfs/identity/identityserver.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identity + +import ( + "context" + + csicommon "github.com/ceph/ceph-csi/internal/csi-common" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// Server struct of ceph CSI driver with supported methods of CSI identity +// server spec. +type Server struct { + *csicommon.DefaultIdentityServer +} + +// NewIdentityServer initialize a identity server for ceph CSI driver. +func NewIdentityServer(d *csicommon.CSIDriver) *Server { + return &Server{ + DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d), + } +} + +// GetPluginCapabilities returns available capabilities of the ceph driver. +func (is *Server) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, nil +} diff --git a/internal/rbd/clone.go b/internal/rbd/clone.go index ef82c714a..7788d4add 100644 --- a/internal/rbd/clone.go +++ b/internal/rbd/clone.go @@ -56,23 +56,12 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume) if err != nil { switch { case errors.Is(err, ErrSnapNotFound): - // check temporary image needs flatten, if yes add task to flatten the - // temporary clone - err = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) - if err != nil { - return false, err - } // as the snapshot is not present, create new snapshot,clone and // delete the temporary snapshot err = createRBDClone(ctx, tempClone, rv, snap) if err != nil { return false, err } - // check image needs flatten, if yes add task to flatten the clone - err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) - if err != nil { - return false, err - } return true, nil @@ -114,11 +103,6 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume) return false, err } - // check image needs flatten, if yes add task to flatten the clone - err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) - if err != nil { - return false, err - } return true, nil } @@ -186,10 +170,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol } func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) error { - var ( - errClone error - errFlatten error - ) + var errClone error // generate temp cloned volume tempClone := rv.generateTempClone() @@ -218,62 +199,22 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro } } - if err != nil || errFlatten != nil { - if !errors.Is(errFlatten, ErrFlattenInProgress) { - // cleanup snapshot - cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone) - if cErr != nil { - log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempSnap, tempClone, cErr) - } + if err != nil { + // cleanup snapshot + cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone) + if cErr != nil { + log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempClone, tempSnap, cErr) } } }() - // flatten clone - errFlatten = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) - if errFlatten != nil { - return errFlatten - } - // create snap of temp clone from temporary cloned image // create final clone // delete snap of temp clone errClone = createRBDClone(ctx, tempClone, rv, cloneSnap) if errClone != nil { - // set errFlatten error to cleanup temporary snapshot and temporary clone - errFlatten = errors.New("failed to create user requested cloned image") - return errClone } return nil } - -func (rv *rbdVolume) flattenCloneImage(ctx context.Context) error { - tempClone := rv.generateTempClone() - // reducing the limit for cloned images to make sure the limit is in range, - // If the intermediate clone reaches the depth we may need to return ABORT - // error message as it need to be flatten before continuing, this may leak - // omap entries and stale temporary snapshots in corner cases, if we reduce - // the limit and check for the depth of the parent image clain itself we - // can flatten the parent images before used to avoid the stale omap entries. - hardLimit := rbdHardMaxCloneDepth - softLimit := rbdSoftMaxCloneDepth - // choosing 2 so that we don't need to flatten the image in the request. - const depthToAvoidFlatten = 2 - if rbdHardMaxCloneDepth > depthToAvoidFlatten { - hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten - } - if rbdSoftMaxCloneDepth > depthToAvoidFlatten { - softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten - } - err := tempClone.getImageInfo() - if err == nil { - return tempClone.flattenRbdImage(ctx, false, hardLimit, softLimit) - } - if !errors.Is(err, ErrImageNotFound) { - return err - } - - return rv.flattenRbdImage(ctx, false, hardLimit, softLimit) -} diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index c6451dd92..1dc179e28 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -22,6 +22,7 @@ import ( csicommon "github.com/ceph/ceph-csi/internal/csi-common" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" librbd "github.com/ceph/go-ceph/rbd" @@ -123,13 +124,27 @@ func (cs *ControllerServer) parseVolCreateRequest( rbdVol, err := genVolFromVolumeOptions( ctx, req.GetParameters(), - req.GetSecrets(), isMultiWriter && isBlock, false) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } + // if the KMS is of type VaultToken, additional metadata is needed + // depending on the tenant, the KMS can be configured with other + // options + // FIXME: this works only on Kubernetes, how do other CO supply metadata? + // namespace is derived from the `csi.storage.k8s.io/pvc/namespace` + // parameter. + + // get the owner of the PVC which is required for few encryption related operations + rbdVol.Owner = k8s.GetOwner(req.GetParameters()) + + err = rbdVol.initKMS(ctx, req.GetParameters(), req.GetSecrets()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + rbdVol.RequestName = req.GetName() // Volume Size - Default is 1 GiB @@ -159,7 +174,8 @@ func (cs *ControllerServer) parseVolCreateRequest( } func buildCreateVolumeResponse(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) *csi.CreateVolumeResponse { - volumeContext := req.GetParameters() + // remove kubernetes csi prefixed parameters. + volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters()) volumeContext["pool"] = rbdVol.Pool volumeContext["journalPool"] = rbdVol.JournalPool volumeContext["imageName"] = rbdVol.RbdImageName @@ -286,7 +302,7 @@ func (cs *ControllerServer) CreateVolume( return nil, err } - err = flattenParentImage(ctx, parentVol, cr) + err = flattenParentImage(ctx, parentVol, rbdSnap, cr) if err != nil { return nil, err } @@ -297,11 +313,9 @@ func (cs *ControllerServer) CreateVolume( } defer func() { if err != nil { - if !errors.Is(err, ErrFlattenInProgress) { - errDefer := undoVolReservation(ctx, rbdVol, cr) - if errDefer != nil { - log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer) - } + errDefer := undoVolReservation(ctx, rbdVol, cr) + if errDefer != nil { + log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer) } } }() @@ -318,12 +332,38 @@ func (cs *ControllerServer) CreateVolume( return buildCreateVolumeResponse(req, rbdVol), nil } -func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error { +// flattenParentImage is to be called before proceeding with creating volume, +// with datasource. This function flattens the parent image accordingly to +// make sure no flattening is required during or after the new volume creation. +// For parent volume, it's parent(temp clone or snapshot) is flattened. +// For parent snapshot, the snapshot itself is flattened. +func flattenParentImage( + ctx context.Context, + rbdVol *rbdVolume, + rbdSnap *rbdSnapshot, + cr *util.Credentials) error { + // flatten the image's parent before the reservation to avoid + // stale entries in post creation if we return ABORT error and the + // DeleteVolume RPC is not called. + // reducing the limit for cloned images to make sure the limit is in range, + // If the intermediate clone reaches the depth we may need to return ABORT + // error message as it need to be flatten before continuing, this may leak + // omap entries and stale temporary snapshots in corner cases, if we reduce + // the limit and check for the depth of the parent image clain itself we + // can flatten the parent images before used to avoid the stale omap entries. + hardLimit := rbdHardMaxCloneDepth + softLimit := rbdSoftMaxCloneDepth if rbdVol != nil { - // flatten the image or its parent before the reservation to avoid - // stale entries in post creation if we return ABORT error and the - // delete volume is not called - err := rbdVol.flattenCloneImage(ctx) + // choosing 2, since cloning image creates a temp clone and a final clone which + // will add a total depth of 2. + const depthToAvoidFlatten = 2 + if rbdHardMaxCloneDepth > depthToAvoidFlatten { + hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten + } + if rbdSoftMaxCloneDepth > depthToAvoidFlatten { + softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten + } + err := rbdVol.flattenParent(ctx, hardLimit, softLimit) if err != nil { return getGRPCErrorForCreateVolume(err) } @@ -335,6 +375,32 @@ func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent return err } } + if rbdSnap != nil { + err := rbdSnap.Connect(cr) + if err != nil { + return getGRPCErrorForCreateVolume(err) + } + // in case of any error call Destroy for cleanup. + defer func() { + if err != nil { + rbdSnap.Destroy() + } + }() + + // choosing 1, since restore from snapshot adds one depth. + const depthToAvoidFlatten = 1 + if rbdHardMaxCloneDepth > depthToAvoidFlatten { + hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten + } + if rbdSoftMaxCloneDepth > depthToAvoidFlatten { + softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten + } + + err = rbdSnap.flattenRbdImage(ctx, false, hardLimit, softLimit) + if err != nil { + return getGRPCErrorForCreateVolume(err) + } + } return nil } @@ -574,10 +640,8 @@ func (cs *ControllerServer) createBackingImage( defer func() { if err != nil { - if !errors.Is(err, ErrFlattenInProgress) { - if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil { - log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr) - } + if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil { + log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr) } } }() @@ -586,15 +650,6 @@ func (cs *ControllerServer) createBackingImage( return status.Error(codes.Internal, err.Error()) } - if rbdSnap != nil { - err = rbdVol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) - if err != nil { - log.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err) - - return err - } - } - return nil } diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 217fe4cfd..2255f1020 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -265,22 +265,14 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str } // ParseEncryptionOpts returns kmsID and sets Owner attribute. -func (ri *rbdImage) ParseEncryptionOpts(ctx context.Context, volOptions map[string]string) (string, error) { +func (ri *rbdImage) ParseEncryptionOpts( + ctx context.Context, + volOptions map[string]string) (string, error) { var ( err error ok bool encrypted, kmsID string ) - - // if the KMS is of type VaultToken, additional metadata is needed - // depending on the tenant, the KMS can be configured with other - // options - // FIXME: this works only on Kubernetes, how do other CO supply metadata? - ri.Owner, ok = volOptions["csi.storage.k8s.io/pvc/namespace"] - if !ok { - log.DebugLog(ctx, "could not detect owner for %s", ri) - } - encrypted, ok = volOptions["encrypted"] if !ok { return "", nil diff --git a/internal/rbd/mirror.go b/internal/rbd/mirror.go index 504268a12..08a5e52b8 100644 --- a/internal/rbd/mirror.go +++ b/internal/rbd/mirror.go @@ -88,8 +88,8 @@ func (ri *rbdImage) promoteImage(force bool) error { return nil } -// forcePromoteImage promotes image to primary with force option with 1 minute -// timeout. If there is no response within 1 minute,the rbd CLI process will be +// forcePromoteImage promotes image to primary with force option with 2 minutes +// timeout. If there is no response within 2 minutes,the rbd CLI process will be // killed and an error is returned. func (rv *rbdVolume) forcePromoteImage(cr *util.Credentials) error { promoteArgs := []string{ @@ -102,7 +102,8 @@ func (rv *rbdVolume) forcePromoteImage(cr *util.Credentials) error { } _, stderr, err := util.ExecCommandWithTimeout( context.TODO(), - time.Minute, + // 2 minutes timeout as the Replication RPC timeout is 2.5 minutes. + 2*time.Minute, "rbd", promoteArgs..., ) diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index affe42fab..143a31ef7 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -147,8 +147,7 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r func populateRbdVol( ctx context.Context, req *csi.NodeStageVolumeRequest, - cr *util.Credentials, - secrets map[string]string) (*rbdVolume, error) { + cr *util.Credentials) (*rbdVolume, error) { var err error var j *journal.Connection volID := req.GetVolumeId() @@ -173,7 +172,7 @@ func populateRbdVol( disableInUseChecks = true } - rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), secrets, disableInUseChecks, true) + rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), disableInUseChecks, true) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -213,6 +212,8 @@ func populateRbdVol( return nil, status.Error(codes.Internal, err.Error()) } rv.RbdImageName = imageAttributes.ImageName + // set owner after extracting the owner name from the journal + rv.Owner = imageAttributes.Owner } err = rv.Connect(cr) @@ -235,6 +236,11 @@ func populateRbdVol( return nil, status.Error(codes.Internal, err.Error()) } + err = rv.initKMS(ctx, req.GetVolumeContext(), req.GetSecrets()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if req.GetVolumeContext()["mounter"] == rbdDefaultMounter && !isKrbdFeatureSupported(ctx, strings.Join(rv.ImageFeatureSet.Names(), ",")) { if !parseBoolOption(ctx, req.GetVolumeContext(), tryOtherMounters, false) { @@ -320,7 +326,7 @@ func (ns *NodeServer) NodeStageVolume( } isStaticVol := parseBoolOption(ctx, req.GetVolumeContext(), staticVol, false) - rv, err := populateRbdVol(ctx, req, cr, req.GetSecrets()) + rv, err := populateRbdVol(ctx, req, cr) if err != nil { return nil, err } diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index 60fce5bbe..7ea8a9466 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -228,21 +228,19 @@ func setRbdNbdToolFeatures() { // returns mounter specific options. func parseMapOptions(mapOptions string) (string, string, error) { var krbdMapOptions, nbdMapOptions string - const ( - noKeyLength = 1 - validLength = 2 - ) for _, item := range strings.Split(mapOptions, ";") { var mounter, options string if item == "" { continue } - s := strings.Split(item, ":") - switch len(s) { - case noKeyLength: + s := strings.SplitN(item, ":", 2) + if len(s) == 1 { options = strings.TrimSpace(s[0]) krbdMapOptions = options - case validLength: + } else { + // options might also contain values delimited with ":", in this + // case mounter type MUST be specified. + // ex: krbd:read_from_replica=localize,crush_location=zone:zone1; mounter = strings.TrimSpace(s[0]) options = strings.TrimSpace(s[1]) switch strings.ToLower(mounter) { @@ -251,10 +249,8 @@ func parseMapOptions(mapOptions string) (string, string, error) { case accessTypeNbd: nbdMapOptions = options default: - return "", "", fmt.Errorf("unknown mounter type: %q", mounter) + return "", "", fmt.Errorf("unknown mounter type: %q, please specify mounter type", mounter) } - default: - return "", "", fmt.Errorf("badly formatted map/unmap options: %q", mapOptions) } } diff --git a/internal/rbd/rbd_attach_test.go b/internal/rbd/rbd_attach_test.go index 07749530d..7b1c6fa0a 100644 --- a/internal/rbd/rbd_attach_test.go +++ b/internal/rbd/rbd_attach_test.go @@ -60,18 +60,25 @@ func TestParseMapOptions(t *testing.T) { expectErr: "", }, { - name: "unknown mounter used", - mapOption: "xyz:xOp1,xOp2", + name: "with `:` delimiter used with in the options", + mapOption: "krbd:kOp1,kOp2=kOp21:kOp22;nbd:nOp1,nOp2=nOp21:nOp22", + expectKrbdOptions: "kOp1,kOp2=kOp21:kOp22", + expectNbdOptions: "nOp1,nOp2=nOp21:nOp22", + expectErr: "", + }, + { + name: "with `:` delimiter used with in the options, without mounter label", + mapOption: "kOp1,kOp2=kOp21:kOp22;nbd:nOp1,nOp2", expectKrbdOptions: "", expectNbdOptions: "", expectErr: "unknown mounter type", }, { - name: "bad formatted options", - mapOption: "nbd:nOp1:nOp2;", + name: "unknown mounter used", + mapOption: "xyz:xOp1,xOp2", expectKrbdOptions: "", expectNbdOptions: "", - expectErr: "badly formatted map/unmap options", + expectErr: "unknown mounter type", }, } for _, tt := range tests { diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index 5f81fafc5..4e2d03c6f 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -267,7 +267,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er rv.RbdImageName = imageData.ImageAttributes.ImageName rv.ImageID = imageData.ImageAttributes.ImageID // check if topology constraints match what is found - rv.Topology, err = util.MatchTopologyForPool(rv.TopologyPools, rv.TopologyRequirement, + _, _, rv.Topology, err = util.MatchPoolAndTopology(rv.TopologyPools, rv.TopologyRequirement, imageData.ImagePool) if err != nil { // TODO check if need any undo operation here, or ErrVolNameConflict @@ -303,7 +303,6 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er return false, err } - // TODO: check image needs flattening and completed? err = rv.repairImageID(ctx, j, false) if err != nil { @@ -413,7 +412,10 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error { var err error if rbdSnap != nil { // check if topology constraints matches snapshot pool - rbdVol.Topology, err = util.MatchTopologyForPool(rbdVol.TopologyPools, + var poolName string + var dataPoolName string + + poolName, dataPoolName, rbdVol.Topology, err = util.MatchPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement, rbdSnap.Pool) if err != nil { return err @@ -421,7 +423,8 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error { // update Pool, if it was topology constrained if rbdVol.Topology != nil { - rbdVol.Pool = rbdSnap.Pool + rbdVol.Pool = poolName + rbdVol.DataPool = dataPoolName } return nil @@ -532,7 +535,7 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent // which are not same across clusters. func RegenerateJournal( volumeAttributes map[string]string, - volumeID, requestName string, + volumeID, requestName, owner string, cr *util.Credentials) (string, error) { ctx := context.Background() var ( @@ -552,6 +555,8 @@ func RegenerateJournal( ErrInvalidVolID, err, rbdVol.VolID) } + rbdVol.Owner = owner + kmsID, err = rbdVol.ParseEncryptionOpts(ctx, volumeAttributes) if err != nil { return "", err diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 562d22815..3afe68962 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1142,7 +1142,7 @@ func generateVolumeFromMapping( func genVolFromVolumeOptions( ctx context.Context, - volOptions, credentials map[string]string, + volOptions map[string]string, disableInUseChecks, checkClusterIDMapping bool) (*rbdVolume, error) { var ( ok bool @@ -1195,11 +1195,6 @@ func genVolFromVolumeOptions( rbdVol.Mounter) rbdVol.DisableInUseChecks = disableInUseChecks - err = rbdVol.initKMS(ctx, volOptions, credentials) - if err != nil { - return nil, err - } - return rbdVol, nil } @@ -1440,6 +1435,47 @@ func (ri *rbdImage) getImageInfo() error { return nil } +// getParent returns parent image if it exists. +func (ri *rbdImage) getParent() (*rbdImage, error) { + err := ri.getImageInfo() + if err != nil { + return nil, err + } + if ri.ParentName == "" { + return nil, nil + } + + parentImage := rbdImage{} + parentImage.conn = ri.conn.Copy() + parentImage.ClusterID = ri.ClusterID + parentImage.Monitors = ri.Monitors + parentImage.Pool = ri.ParentPool + parentImage.RadosNamespace = ri.RadosNamespace + parentImage.RbdImageName = ri.ParentName + + err = parentImage.getImageInfo() + if err != nil { + return nil, err + } + + return &parentImage, nil +} + +// flattenParent flatten the given image's parent if it exists according to hard and soft +// limits. +func (ri *rbdImage) flattenParent(ctx context.Context, hardLimit, softLimit uint) error { + parentImage, err := ri.getParent() + if err != nil { + return err + } + + if parentImage == nil { + return nil + } + + return parentImage.flattenRbdImage(ctx, false, hardLimit, softLimit) +} + /* checkSnapExists queries rbd about the snapshots of the given image and returns ErrImageNotFound if provided image is not found, and ErrSnapNotFound if diff --git a/internal/util/k8s/parameters.go b/internal/util/k8s/parameters.go new file mode 100644 index 000000000..5d85182b5 --- /dev/null +++ b/internal/util/k8s/parameters.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package k8s + +import ( + "strings" +) + +// CSI Parameters prefixed with csiParameterPrefix are passed through +// to the driver on CreateVolumeRequest/CreateSnapshotRequest calls. +const ( + csiParameterPrefix = "csi.storage.k8s.io/" + pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" +) + +// RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix. +func RemoveCSIPrefixedParameters(param map[string]string) map[string]string { + newParam := map[string]string{} + for k, v := range param { + if !strings.HasPrefix(k, csiParameterPrefix) { + // add the parameter to the new map if its not having the prefix + newParam[k] = v + } + } + + return newParam +} + +// GetOwner returns the pvc namespace name from the parameter. +func GetOwner(param map[string]string) string { + return param[pvcNamespaceKey] +} diff --git a/internal/util/k8s/parameters_test.go b/internal/util/k8s/parameters_test.go new file mode 100644 index 000000000..fa959b894 --- /dev/null +++ b/internal/util/k8s/parameters_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package k8s + +import ( + "reflect" + "testing" +) + +func TestRemoveCSIPrefixedParameters(t *testing.T) { + t.Parallel() + tests := []struct { + name string + param map[string]string + want map[string]string + }{ + { + name: "without csi.storage.k8s.io prefix", + param: map[string]string{ + "foo": "bar", + }, + want: map[string]string{ + "foo": "bar", + }, + }, + { + name: "with csi.storage.k8s.io prefix", + param: map[string]string{ + "foo": "bar", + "csi.storage.k8s.io/pvc/name": "foo", + "csi.storage.k8s.io/pvc/namespace": "bar", + "csi.storage.k8s.io/pv/name": "baz", + }, + want: map[string]string{ + "foo": "bar", + }, + }, + } + for _, tt := range tests { + ts := tt + t.Run(ts.name, func(t *testing.T) { + t.Parallel() + got := RemoveCSIPrefixedParameters(ts.param) + if !reflect.DeepEqual(got, ts.want) { + t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, ts.want) + } + }) + } +} + +func TestGetOwner(t *testing.T) { + t.Parallel() + tests := []struct { + name string + args map[string]string + want string + }{ + { + name: "namespace is not present in the parameters", + args: map[string]string{ + "foo": "bar", + }, + want: "", + }, + { + name: "namespace is present in the parameters", + args: map[string]string{ + "csi.storage.k8s.io/pvc/namespace": "bar", + }, + want: "bar", + }, + } + for _, tt := range tests { + ts := tt + t.Run(ts.name, func(t *testing.T) { + t.Parallel() + if got := GetOwner(ts.args); got != ts.want { + t.Errorf("GetOwner() = %v, want %v", got, ts.want) + } + }) + } +} diff --git a/internal/util/reftracker/errors/errors.go b/internal/util/reftracker/errors/errors.go new file mode 100644 index 000000000..33f1a2740 --- /dev/null +++ b/internal/util/reftracker/errors/errors.go @@ -0,0 +1,85 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + goerrors "errors" + "fmt" + + "github.com/ceph/go-ceph/rados" + "golang.org/x/sys/unix" +) + +// ErrObjectOutOfDate is an error returned by RADOS read/write ops whose +// rados_*_op_assert_version failed. +var ErrObjectOutOfDate = goerrors.New("object is out of date since the last time it was read, try again later") + +// UnexpectedReadSize formats an error message for a failure due to bad read +// size. +func UnexpectedReadSize(expectedBytes, actualBytes int) error { + return fmt.Errorf("unexpected size read: expected %d bytes, got %d", + expectedBytes, actualBytes) +} + +// UnknownObjectVersion formats an error message for a failure due to unknown +// reftracker object version. +func UnknownObjectVersion(unknownVersion uint32) error { + return fmt.Errorf("unknown reftracker version %d", unknownVersion) +} + +// FailedObjectRead formats an error message for a failed RADOS read op. +func FailedObjectRead(cause error) error { + if cause != nil { + return fmt.Errorf("failed to read object: %w", TryRADOSAborted(cause)) + } + + return nil +} + +// FailedObjectRead formats an error message for a failed RADOS read op. +func FailedObjectWrite(cause error) error { + if cause != nil { + return fmt.Errorf("failed to write object: %w", TryRADOSAborted(cause)) + } + + return nil +} + +// TryRADOSAborted tries to extract rados_*_op_assert_version from opErr. +func TryRADOSAborted(opErr error) error { + if opErr == nil { + return nil + } + + var radosOpErr rados.OperationError + if !goerrors.As(opErr, &radosOpErr) { + return opErr + } + + // nolint:errorlint // Can't use errors.As() because rados.radosError is private. + errnoErr, ok := radosOpErr.OpError.(interface{ ErrorCode() int }) + if !ok { + return opErr + } + + errno := errnoErr.ErrorCode() + if errno == -int(unix.EOVERFLOW) || errno == -int(unix.ERANGE) { + return ErrObjectOutOfDate + } + + return nil +} diff --git a/internal/util/reftracker/radoswrapper/fakerados.go b/internal/util/reftracker/radoswrapper/fakerados.go new file mode 100644 index 000000000..c7ff59862 --- /dev/null +++ b/internal/util/reftracker/radoswrapper/fakerados.go @@ -0,0 +1,551 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package radoswrapper + +import ( + "fmt" + + "github.com/ceph/go-ceph/rados" + "golang.org/x/sys/unix" +) + +type ( + FakeObj struct { + Oid string + Ver uint64 + Xattrs map[string][]byte + Omap map[string][]byte + Data []byte + } + + FakeRados struct { + Objs map[string]*FakeObj + } + + FakeIOContext struct { + LastObjVersion uint64 + Rados *FakeRados + } + + FakeWriteOp struct { + IoCtx *FakeIOContext + + steps map[fakeWriteOpStepExecutorIdx]fakeWriteOpStepExecutor + oid string + } + + FakeReadOp struct { + IoCtx *FakeIOContext + + steps map[fakeReadOpStepExecutorIdx]fakeReadOpStepExecutor + oid string + } + + fakeWriteOpStepExecutorIdx int + fakeReadOpStepExecutorIdx int + + fakeWriteOpStepExecutor interface { + operate(w *FakeWriteOp) error + } + + fakeReadOpStepExecutor interface { + operate(r *FakeReadOp) error + } + + fakeRadosError int +) + +const ( + fakeWriteOpAssertVersionExecutorIdx fakeWriteOpStepExecutorIdx = iota + fakeWriteOpRemoveExecutorIdx + fakeWriteOpCreateExecutorIdx + fakeWriteOpSetXattrExecutorIdx + fakeWriteOpWriteFullExecutorIdx + fakeWriteOpRmOmapKeysExecutorIdx + fakeWriteOpSetOmapExecutorIdx + + fakeReadOpAssertVersionExecutorIdx fakeReadOpStepExecutorIdx = iota + fakeReadOpReadExecutorIdx + fakeReadOpGetOmapValuesByKeysExecutorIdx +) + +var ( + _ IOContextW = &FakeIOContext{} + + // fakeWriteOpStepExecutorOrder defines fixed order in which the write ops are performed. + fakeWriteOpStepExecutorOrder = []fakeWriteOpStepExecutorIdx{ + fakeWriteOpAssertVersionExecutorIdx, + fakeWriteOpRemoveExecutorIdx, + fakeWriteOpCreateExecutorIdx, + fakeWriteOpSetXattrExecutorIdx, + fakeWriteOpWriteFullExecutorIdx, + fakeWriteOpRmOmapKeysExecutorIdx, + fakeWriteOpSetOmapExecutorIdx, + } + + // fakeReadOpStepExecutorOrder defines fixed order in which the read ops are performed. + fakeReadOpStepExecutorOrder = []fakeReadOpStepExecutorIdx{ + fakeReadOpAssertVersionExecutorIdx, + fakeReadOpReadExecutorIdx, + fakeReadOpGetOmapValuesByKeysExecutorIdx, + } +) + +func NewFakeRados() *FakeRados { + return &FakeRados{ + Objs: make(map[string]*FakeObj), + } +} + +func NewFakeIOContext(fakeRados *FakeRados) *FakeIOContext { + return &FakeIOContext{ + Rados: fakeRados, + } +} + +func (e fakeRadosError) Error() string { + return fmt.Sprintf("FakeRados errno=%d", int(e)) +} + +func (e fakeRadosError) ErrorCode() int { + return int(e) +} + +func (o *FakeObj) String() string { + return fmt.Sprintf("%s{Ver=%d, Xattrs(%d)=%+v, OMap(%d)=%+v, Data(%d)=%+v}", + o.Oid, o.Ver, len(o.Xattrs), o.Xattrs, len(o.Omap), o.Omap, len(o.Data), o.Data) +} + +func (c *FakeIOContext) GetLastVersion() (uint64, error) { + return c.LastObjVersion, nil +} + +func (c *FakeIOContext) getObj(oid string) (*FakeObj, error) { + obj, ok := c.Rados.Objs[oid] + if !ok { + return nil, rados.ErrNotFound + } + + return obj, nil +} + +func (c *FakeIOContext) GetXattr(oid, key string, data []byte) (int, error) { + obj, ok := c.Rados.Objs[oid] + if !ok { + return 0, rados.ErrNotFound + } + + xattr, ok := obj.Xattrs[key] + if !ok { + return 0, fakeRadosError(-int(unix.ENODATA)) + } + copy(data, xattr) + + return len(xattr), nil +} + +func (c *FakeIOContext) CreateWriteOp() WriteOpW { + return &FakeWriteOp{ + IoCtx: c, + steps: make(map[fakeWriteOpStepExecutorIdx]fakeWriteOpStepExecutor), + } +} + +func (w *FakeWriteOp) Operate(oid string) error { + if len(w.steps) == 0 { + return nil + } + + w.oid = oid + + for _, writeOpExecutorIdx := range fakeWriteOpStepExecutorOrder { + e, ok := w.steps[writeOpExecutorIdx] + if !ok { + continue + } + + if err := e.operate(w); err != nil { + return err + } + } + + if obj, err := w.IoCtx.getObj(oid); err == nil { + obj.Ver++ + w.IoCtx.LastObjVersion = obj.Ver + } + + return nil +} + +func (w *FakeWriteOp) Release() {} + +func (c *FakeIOContext) CreateReadOp() ReadOpW { + return &FakeReadOp{ + IoCtx: c, + steps: make(map[fakeReadOpStepExecutorIdx]fakeReadOpStepExecutor), + } +} + +func (r *FakeReadOp) Operate(oid string) error { + r.oid = oid + + for _, readOpExecutorIdx := range fakeReadOpStepExecutorOrder { + e, ok := r.steps[readOpExecutorIdx] + if !ok { + continue + } + + if err := e.operate(r); err != nil { + return err + } + } + + if obj, err := r.IoCtx.getObj(oid); err == nil { + r.IoCtx.LastObjVersion = obj.Ver + } + + return nil +} + +func (r *FakeReadOp) Release() {} + +// WriteOp Create + +type fakeWriteOpCreateExecutor struct { + exclusive rados.CreateOption +} + +func (e *fakeWriteOpCreateExecutor) operate(w *FakeWriteOp) error { + if e.exclusive == rados.CreateExclusive { + if _, exists := w.IoCtx.Rados.Objs[w.oid]; exists { + return rados.ErrObjectExists + } + } + + w.IoCtx.Rados.Objs[w.oid] = &FakeObj{ + Oid: w.oid, + Omap: make(map[string][]byte), + Xattrs: make(map[string][]byte), + } + + return nil +} + +func (w *FakeWriteOp) Create(exclusive rados.CreateOption) { + w.steps[fakeWriteOpCreateExecutorIdx] = &fakeWriteOpCreateExecutor{ + exclusive: exclusive, + } +} + +// WriteOp Remove + +type fakeWriteOpRemoveExecutor struct{} + +func (e *fakeWriteOpRemoveExecutor) operate(w *FakeWriteOp) error { + if _, err := w.IoCtx.getObj(w.oid); err != nil { + return err + } + + delete(w.IoCtx.Rados.Objs, w.oid) + + return nil +} + +func (w *FakeWriteOp) Remove() { + w.steps[fakeWriteOpRemoveExecutorIdx] = &fakeWriteOpRemoveExecutor{} +} + +// WriteOp SetXattr + +type fakeWriteOpSetXattrExecutor struct { + name string + value []byte +} + +func (e *fakeWriteOpSetXattrExecutor) operate(w *FakeWriteOp) error { + obj, err := w.IoCtx.getObj(w.oid) + if err != nil { + return err + } + + obj.Xattrs[e.name] = e.value + + return nil +} + +func (w *FakeWriteOp) SetXattr(name string, value []byte) { + valueCopy := append([]byte(nil), value...) + + w.steps[fakeWriteOpSetXattrExecutorIdx] = &fakeWriteOpSetXattrExecutor{ + name: name, + value: valueCopy, + } +} + +// WriteOp WriteFull + +type fakeWriteOpWriteFullExecutor struct { + data []byte +} + +func (e *fakeWriteOpWriteFullExecutor) operate(w *FakeWriteOp) error { + obj, err := w.IoCtx.getObj(w.oid) + if err != nil { + return err + } + + obj.Data = e.data + + return nil +} + +func (w *FakeWriteOp) WriteFull(b []byte) { + bCopy := append([]byte(nil), b...) + + w.steps[fakeWriteOpWriteFullExecutorIdx] = &fakeWriteOpWriteFullExecutor{ + data: bCopy, + } +} + +// WriteOp SetOmap + +type fakeWriteOpSetOmapExecutor struct { + pairs map[string][]byte +} + +func (e *fakeWriteOpSetOmapExecutor) operate(w *FakeWriteOp) error { + obj, err := w.IoCtx.getObj(w.oid) + if err != nil { + return err + } + + for k, v := range e.pairs { + obj.Omap[k] = v + } + + return nil +} + +func (w *FakeWriteOp) SetOmap(pairs map[string][]byte) { + pairsCopy := make(map[string][]byte, len(pairs)) + for k, v := range pairs { + vCopy := append([]byte(nil), v...) + pairsCopy[k] = vCopy + } + + w.steps[fakeWriteOpSetOmapExecutorIdx] = &fakeWriteOpSetOmapExecutor{ + pairs: pairsCopy, + } +} + +// WriteOp RmOmapKeys + +type fakeWriteOpRmOmapKeysExecutor struct { + keys []string +} + +func (e *fakeWriteOpRmOmapKeysExecutor) operate(w *FakeWriteOp) error { + obj, err := w.IoCtx.getObj(w.oid) + if err != nil { + return err + } + + for _, k := range e.keys { + delete(obj.Omap, k) + } + + return nil +} + +func (w *FakeWriteOp) RmOmapKeys(keys []string) { + keysCopy := append([]string(nil), keys...) + + w.steps[fakeWriteOpRmOmapKeysExecutorIdx] = &fakeWriteOpRmOmapKeysExecutor{ + keys: keysCopy, + } +} + +// WriteOp AssertVersion + +type fakeWriteOpAssertVersionExecutor struct { + version uint64 +} + +func (e *fakeWriteOpAssertVersionExecutor) operate(w *FakeWriteOp) error { + obj, err := w.IoCtx.getObj(w.oid) + if err != nil { + return err + } + + return validateObjVersion(obj.Ver, e.version) +} + +func (w *FakeWriteOp) AssertVersion(v uint64) { + w.steps[fakeWriteOpAssertVersionExecutorIdx] = &fakeWriteOpAssertVersionExecutor{ + version: v, + } +} + +// ReadOp Read + +type fakeReadOpReadExecutor struct { + offset int + buffer []byte + step *rados.ReadOpReadStep +} + +func (e *fakeReadOpReadExecutor) operate(r *FakeReadOp) error { + obj, err := r.IoCtx.getObj(r.oid) + if err != nil { + return err + } + + if e.offset > len(obj.Data) { + // RADOS just returns zero bytes read. + return nil + } + + end := e.offset + len(e.buffer) + if end > len(obj.Data) { + end = len(obj.Data) + } + + nbytes := end - e.offset + e.step.BytesRead = int64(nbytes) + copy(e.buffer, obj.Data[e.offset:]) + + return nil +} + +func (r *FakeReadOp) Read(offset uint64, buffer []byte) *rados.ReadOpReadStep { + s := &rados.ReadOpReadStep{} + r.steps[fakeReadOpReadExecutorIdx] = &fakeReadOpReadExecutor{ + offset: int(offset), + buffer: buffer, + step: s, + } + + return s +} + +// ReadOp GetOmapValuesByKeys + +type ( + fakeReadOpGetOmapValuesByKeysExecutor struct { + keys []string + step *FakeReadOpOmapGetValsByKeysStep + } + + FakeReadOpOmapGetValsByKeysStep struct { + pairs []rados.OmapKeyValue + idx int + canIterate bool + } +) + +func (e *fakeReadOpGetOmapValuesByKeysExecutor) operate(r *FakeReadOp) error { + obj, err := r.IoCtx.getObj(r.oid) + if err != nil { + return err + } + + var pairs []rados.OmapKeyValue + for _, key := range e.keys { + val, ok := obj.Omap[key] + if !ok { + continue + } + + pairs = append(pairs, rados.OmapKeyValue{ + Key: key, + Value: val, + }) + } + + e.step.pairs = pairs + e.step.canIterate = true + + return nil +} + +func (s *FakeReadOpOmapGetValsByKeysStep) Next() (*rados.OmapKeyValue, error) { + if !s.canIterate { + return nil, rados.ErrOperationIncomplete + } + + if s.idx >= len(s.pairs) { + return nil, nil + } + + omapKeyValue := &s.pairs[s.idx] + s.idx++ + + return omapKeyValue, nil +} + +func (r *FakeReadOp) GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW { + keysCopy := append([]string(nil), keys...) + + s := &FakeReadOpOmapGetValsByKeysStep{} + r.steps[fakeReadOpGetOmapValuesByKeysExecutorIdx] = &fakeReadOpGetOmapValuesByKeysExecutor{ + keys: keysCopy, + step: s, + } + + return s +} + +// ReadOp AssertVersion + +type fakeReadOpAssertVersionExecutor struct { + version uint64 +} + +func (e *fakeReadOpAssertVersionExecutor) operate(r *FakeReadOp) error { + obj, err := r.IoCtx.getObj(r.oid) + if err != nil { + return err + } + + return validateObjVersion(obj.Ver, e.version) +} + +func (r *FakeReadOp) AssertVersion(v uint64) { + r.steps[fakeReadOpAssertVersionExecutorIdx] = &fakeReadOpAssertVersionExecutor{ + version: v, + } +} + +func validateObjVersion(expected, actual uint64) error { + // See librados docs for returning error codes in rados_*_op_assert_version: + // https://docs.ceph.com/en/latest/rados/api/librados/?#c.rados_write_op_assert_version + // https://docs.ceph.com/en/latest/rados/api/librados/?#c.rados_read_op_assert_version + + if expected > actual { + return rados.OperationError{ + OpError: fakeRadosError(-int(unix.ERANGE)), + } + } + + if expected < actual { + return rados.OperationError{ + OpError: fakeRadosError(-int(unix.EOVERFLOW)), + } + } + + return nil +} diff --git a/internal/util/reftracker/radoswrapper/interface.go b/internal/util/reftracker/radoswrapper/interface.go new file mode 100644 index 000000000..23e21dc91 --- /dev/null +++ b/internal/util/reftracker/radoswrapper/interface.go @@ -0,0 +1,106 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package radoswrapper + +import ( + "github.com/ceph/go-ceph/rados" +) + +// These interfaces are just wrappers around some of go-ceph's rados pkg +// structures and functions. They have two implementations: the "real" one +// (that simply uses go-ceph), and a fake one, used in unit tests. + +// IOContextW is a wrapper around rados.IOContext. +type IOContextW interface { + // GetLastVersion will return the version number of the last object read or + // written to. + GetLastVersion() (uint64, error) + + // GetXattr gets an xattr with key `name`, it returns the length of + // the key read or an error if not successful + GetXattr(oid string, key string, data []byte) (int, error) + + // CreateWriteOp returns a newly constructed write operation. + CreateWriteOp() WriteOpW + + // CreateReadOp returns a newly constructed read operation. + CreateReadOp() ReadOpW +} + +// WriteOpW is a wrapper around rados.WriteOp interface. +type WriteOpW interface { + // Create a rados object. + Create(exclusive rados.CreateOption) + + // Remove object. + Remove() + + // SetXattr sets an xattr. + SetXattr(name string, value []byte) + + // WriteFull writes a given byte slice as the whole object, + // atomically replacing it. + WriteFull(b []byte) + + // SetOmap appends the map `pairs` to the omap `oid`. + SetOmap(pairs map[string][]byte) + + // RmOmapKeys removes the specified `keys` from the omap `oid`. + RmOmapKeys(keys []string) + + // AssertVersion ensures that the object exists and that its internal version + // number is equal to "ver" before writing. "ver" should be a version number + // previously obtained with IOContext.GetLastVersion(). + AssertVersion(ver uint64) + + // Operate will perform the operation(s). + Operate(oid string) error + + // Release the resources associated with this write operation. + Release() +} + +// ReadOpW is a wrapper around rados.ReadOp. +type ReadOpW interface { + // Read bytes from offset into buffer. + // len(buffer) is the maximum number of bytes read from the object. + // buffer[:ReadOpReadStep.BytesRead] then contains object data. + Read(offset uint64, buffer []byte) *rados.ReadOpReadStep + + // GetOmapValuesByKeys starts iterating over specific key/value pairs. + GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW + + // AssertVersion ensures that the object exists and that its internal version + // number is equal to "ver" before reading. "ver" should be a version number + // previously obtained with IOContext.GetLastVersion(). + AssertVersion(ver uint64) + + // Operate will perform the operation(s). + Operate(oid string) error + + // Release the resources associated with this read operation. + Release() +} + +// ReadOpOmapGetValsByKeysStepW is a wrapper around rados.ReadOpOmapGetValsByKeysStep. +type ReadOpOmapGetValsByKeysStepW interface { + // Next gets the next omap key/value pair referenced by + // ReadOpOmapGetValsByKeysStep's internal iterator. + // If there are no more elements to retrieve, (nil, nil) is returned. + // May be called only after Operate() finished. + Next() (*rados.OmapKeyValue, error) +} diff --git a/internal/util/reftracker/radoswrapper/radoswrapper.go b/internal/util/reftracker/radoswrapper/radoswrapper.go new file mode 100644 index 000000000..133ccfa1c --- /dev/null +++ b/internal/util/reftracker/radoswrapper/radoswrapper.go @@ -0,0 +1,133 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package radoswrapper + +import ( + "github.com/ceph/go-ceph/rados" +) + +type ( + IOContext struct { + *rados.IOContext + } + + WriteOp struct { + IoCtx *rados.IOContext + *rados.WriteOp + } + + ReadOp struct { + IoCtx *rados.IOContext + *rados.ReadOp + } + + ReadOpOmapGetValsByKeysStep struct { + *rados.ReadOpOmapGetValsByKeysStep + } +) + +var _ IOContextW = &IOContext{} + +func NewIOContext(ioctx *rados.IOContext) IOContextW { + return &IOContext{ + IOContext: ioctx, + } +} + +func (c *IOContext) GetLastVersion() (uint64, error) { + return c.IOContext.GetLastVersion() +} + +func (c *IOContext) GetXattr(oid, key string, data []byte) (int, error) { + return c.IOContext.GetXattr(oid, key, data) +} + +func (c *IOContext) CreateWriteOp() WriteOpW { + return &WriteOp{ + IoCtx: c.IOContext, + WriteOp: rados.CreateWriteOp(), + } +} + +func (c *IOContext) CreateReadOp() ReadOpW { + return &ReadOp{ + IoCtx: c.IOContext, + ReadOp: rados.CreateReadOp(), + } +} + +func (w *WriteOp) Create(exclusive rados.CreateOption) { + w.WriteOp.Create(exclusive) +} + +func (w *WriteOp) Remove() { + w.WriteOp.Remove() +} + +func (w *WriteOp) SetXattr(name string, value []byte) { + w.WriteOp.SetXattr(name, value) +} + +func (w *WriteOp) WriteFull(b []byte) { + w.WriteOp.WriteFull(b) +} + +func (w *WriteOp) SetOmap(pairs map[string][]byte) { + w.WriteOp.SetOmap(pairs) +} + +func (w *WriteOp) RmOmapKeys(keys []string) { + w.WriteOp.RmOmapKeys(keys) +} + +func (w *WriteOp) AssertVersion(v uint64) { + w.WriteOp.AssertVersion(v) +} + +func (w *WriteOp) Operate(oid string) error { + return w.WriteOp.Operate(w.IoCtx, oid, rados.OperationNoFlag) +} + +func (w *WriteOp) Release() { + w.WriteOp.Release() +} + +func (r *ReadOp) Read(offset uint64, buffer []byte) *rados.ReadOpReadStep { + return r.ReadOp.Read(offset, buffer) +} + +func (r *ReadOp) GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW { + return &ReadOpOmapGetValsByKeysStep{ + ReadOpOmapGetValsByKeysStep: r.ReadOp.GetOmapValuesByKeys(keys), + } +} + +func (r *ReadOp) AssertVersion(v uint64) { + r.ReadOp.AssertVersion(v) +} + +func (r *ReadOp) Operate(oid string) error { + return r.ReadOp.Operate(r.IoCtx, oid, rados.OperationNoFlag) +} + +func (r *ReadOp) Release() { + r.ReadOp.Release() +} + +func (s *ReadOpOmapGetValsByKeysStep) Next() (*rados.OmapKeyValue, error) { + return s.ReadOpOmapGetValsByKeysStep.Next() +} diff --git a/internal/util/reftracker/reftracker.go b/internal/util/reftracker/reftracker.go new file mode 100644 index 000000000..ce1863220 --- /dev/null +++ b/internal/util/reftracker/reftracker.go @@ -0,0 +1,248 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reftracker + +import ( + goerrors "errors" + "fmt" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" + "github.com/ceph/ceph-csi/internal/util/reftracker/reftype" + v1 "github.com/ceph/ceph-csi/internal/util/reftracker/v1" + "github.com/ceph/ceph-csi/internal/util/reftracker/version" + + "github.com/ceph/go-ceph/rados" +) + +// reftracker is key-based implementation of a reference counter. +// +// Unlike integer-based counter, reftracker counts references by tracking +// unique keys. This allows accounting in situations where idempotency must be +// preserved. It guarantees there will be no duplicit increments or decrements +// of the counter. +// +// It is stored persistently as a RADOS object, and is safe to use with +// multiple concurrent writers, and across different nodes of a cluster. +// +// Example: +// +// created, err := Add( +// ioctx, +// "my-reftracker", +// map[string]struct{}{ +// "ref-key-1": {}, +// "ref-key-2": {}, +// }, +// ) +// +// Since this is a new reftracker object, `created` is `true`. +// +// "my-reftracker" now holds: +// ["ref-key-1":reftype.Normal, "ref-key-2":reftype.Normal] +// The reference count is 2. +// +// created, err := Add( +// ioctx, +// "my-reftracker", +// map[string]struct{}{ +// "ref-key-1": {}, +// "ref-key-2": {}, +// "ref-key-3": {}, +// }, +// ) +// +// Reftracker named "my-reftracker" already exists, so `created` is now +// `false`. Since "ref-key-1" and "ref-key-2" keys are already tracked, +// only "ref-key-3" is added. +// +// "my-reftracker" now holds: +// ["ref-key-1":reftype.Normal, "ref-key-2":reftype.Normal, +// "ref-key-3":reftype.Normal] +// The reference count is 3. +// +// deleted, err := Remove( +// ioctx, +// "my-reftracker", +// map[string]reftype.RefType{ +// "ref-key-1": reftype.Normal, +// "ref-key-2": reftype.Mask, +// }, +// ) +// +// "my-reftracker" now holds: +// ["ref-key-2":reftype.Mask, "ref-key-3":reftype.Normal] +// The reference count is 1. +// +// Since the reference count is greater than zero, `deleted` is `false`. +// "ref-key-1" was removed, and so is not listed among tracked references. +// "ref-key-2" was only masked, so it's been kept. However, masked references +// don't contribute to overall reference count, so the resulting refcount +// after this Remove() call is 1. +// +// created, err := Add( +// ioctx, +// "my-reftracker", +// map[string]struct{}{ +// "ref-key-2": {}, +// }, +// ) +// +// "my-reftracker" now holds: +// ["ref-key-2":reftype.Mask, "ref-key-3":reftype.Normal] +// The reference count is 1. +// +// "ref-key-2" is already tracked, so it will not be added again. Since it +// remains masked, it won't contribute to the reference count. +// +// deleted, err := Remove( +// ioctx, +// "my-reftracker", +// map[string]reftype.RefType{ +// "ref-key-3": reftype.Normal, +// }, +// ) +// +// "ref-key-3" was the only tracked key that contributed to reference count. +// After this Remove() call it's now removed. As a result, the reference count +// dropped down to zero, and the whole object has been deleted too. +// `deleted` is `true`. + +// Add atomically adds references to `rtName` reference tracker. +// If the reftracker object doesn't exist yet, it is created and `true` is +// returned. If some keys in `refs` map are already tracked by this reftracker +// object, they will not be added again. +func Add( + ioctx radoswrapper.IOContextW, + rtName string, + refs map[string]struct{}, +) (bool, error) { + if err := validateAddInput(rtName, refs); err != nil { + return false, err + } + + // Read reftracker version. + + rtVer, err := version.Read(ioctx, rtName) + if err != nil { + if goerrors.Is(err, rados.ErrNotFound) { + // This is a new reftracker. Initialize it with `refs`. + if err = v1.Init(ioctx, rtName, refs); err != nil { + return false, fmt.Errorf("failed to initialize reftracker: %w", err) + } + + return true, nil + } + + return false, fmt.Errorf("failed to read reftracker version: %w", err) + } + + // Add references to reftracker object. + + gen, err := ioctx.GetLastVersion() + if err != nil { + return false, fmt.Errorf("failed to get RADOS object version: %w", err) + } + + switch rtVer { + case v1.Version: + err = v1.Add(ioctx, rtName, gen, refs) + if err != nil { + err = fmt.Errorf("failed to add refs: %w", err) + } + default: + err = errors.UnknownObjectVersion(rtVer) + } + + return false, err +} + +// Remove atomically removes references from `rtName` reference tracker. +// If the reftracker object holds no references after this removal, the whole +// object is deleted too, and `true` is returned. If the reftracker object +// doesn't exist, (true, nil) is returned. +func Remove( + ioctx radoswrapper.IOContextW, + rtName string, + refs map[string]reftype.RefType, +) (bool, error) { + if err := validateRemoveInput(rtName, refs); err != nil { + return false, err + } + + // Read reftracker version. + + rtVer, err := version.Read(ioctx, rtName) + if err != nil { + if goerrors.Is(err, rados.ErrNotFound) { + // This reftracker doesn't exist. Assume it was already deleted. + return true, nil + } + + return false, fmt.Errorf("failed to read reftracker version: %w", err) + } + + // Remove references from reftracker. + + gen, err := ioctx.GetLastVersion() + if err != nil { + return false, fmt.Errorf("failed to get RADOS object version: %w", err) + } + + var deleted bool + + switch rtVer { + case v1.Version: + deleted, err = v1.Remove(ioctx, rtName, gen, refs) + if err != nil { + err = fmt.Errorf("failed to remove refs: %w", err) + } + default: + err = errors.UnknownObjectVersion(rtVer) + } + + return deleted, err +} + +var ( + errNoRTName = goerrors.New("missing reftracker name") + errNoRefs = goerrors.New("missing refs") +) + +func validateAddInput(rtName string, refs map[string]struct{}) error { + if rtName == "" { + return errNoRTName + } + + if len(refs) == 0 { + return errNoRefs + } + + return nil +} + +func validateRemoveInput(rtName string, refs map[string]reftype.RefType) error { + if rtName == "" { + return errNoRTName + } + + if len(refs) == 0 { + return errNoRefs + } + + return nil +} diff --git a/internal/util/reftracker/reftracker_test.go b/internal/util/reftracker/reftracker_test.go new file mode 100644 index 000000000..58a121d6d --- /dev/null +++ b/internal/util/reftracker/reftracker_test.go @@ -0,0 +1,491 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reftracker + +import ( + "testing" + + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" + "github.com/ceph/ceph-csi/internal/util/reftracker/reftype" + + "github.com/stretchr/testify/assert" +) + +const rtName = "hello-rt" + +func TestRTAdd(t *testing.T) { + t.Parallel() + + // Verify input validation for reftracker name. + t.Run("AddNoName", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + created, err := Add(ioctx, "", nil) + assert.Error(ts, err) + assert.False(ts, created) + }) + + // Verify input validation for nil and empty refs. + t.Run("AddNoRefs", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + refs := []map[string]struct{}{ + nil, + make(map[string]struct{}), + } + for _, ref := range refs { + created, err := Add(ioctx, rtName, ref) + assert.Error(ts, err) + assert.False(ts, created) + } + }) + + // Add multiple refs in a single Add(). + t.Run("AddBulk", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + }) + + // Add refs where each Add() has some of the refs overlapping + // with the previous call. + t.Run("AddOverlapping", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + refsTable := []map[string]struct{}{ + {"ref2": {}, "ref3": {}}, + {"ref3": {}, "ref4": {}}, + {"ref4": {}, "ref5": {}}, + } + for _, refs := range refsTable { + created, err = Add(ioctx, rtName, refs) + assert.NoError(ts, err) + assert.False(ts, created) + } + }) +} + +func TestRTRemove(t *testing.T) { + t.Parallel() + + // Verify input validation for nil and empty refs. + t.Run("RemoveNoRefs", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + refs := []map[string]reftype.RefType{ + nil, + make(map[string]reftype.RefType), + } + for _, ref := range refs { + created, err := Remove(ioctx, rtName, ref) + assert.Error(ts, err) + assert.False(ts, created) + } + }) + + // Attempt to remove refs in a non-existent reftracker object should result + // in success, with deleted=true,err=nil. + t.Run("RemoveNotExists", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + deleted, err := Remove(ioctx, "xxx", map[string]reftype.RefType{ + "ref1": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Removing only non-existent refs should not result in reftracker object + // deletion. + t.Run("RemoveNonExistentRefs", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "refX": reftype.Normal, + "refY": reftype.Normal, + "refZ": reftype.Normal, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + }) + + // Removing all refs plus some surplus should result in reftracker object + // deletion. + t.Run("RemoveNonExistentRefs", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "refX": reftype.Normal, + "refY": reftype.Normal, + "ref": reftype.Normal, + "refZ": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Bulk removal of all refs should result in reftracker object deletion. + t.Run("RemoveBulk", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + keys := []string{"ref1", "ref2", "ref3"} + refsToAdd := make(map[string]struct{}) + refsToRemove := make(map[string]reftype.RefType) + for _, k := range keys { + refsToAdd[k] = struct{}{} + refsToRemove[k] = reftype.Normal + } + + created, err := Add(ioctx, rtName, refsToAdd) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, refsToRemove) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Removal of all refs one-by-one should result in reftracker object deletion + // in the last Remove() call. + t.Run("RemoveSingle", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + for _, k := range []string{"ref3", "ref2"} { + deleted, errRemove := Remove(ioctx, rtName, map[string]reftype.RefType{ + k: reftype.Normal, + }) + assert.NoError(ts, errRemove) + assert.False(ts, deleted) + } + + // Remove the last reference. It should remove the whole reftracker object too. + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Cycle through reftracker object twice. + t.Run("AddRemoveAddRemove", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + refsToAdd := map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + } + refsToRemove := map[string]reftype.RefType{ + "ref1": reftype.Normal, + "ref2": reftype.Normal, + "ref3": reftype.Normal, + } + + for i := 0; i < 2; i++ { + created, err := Add(ioctx, rtName, refsToAdd) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, refsToRemove) + assert.NoError(ts, err) + assert.True(ts, deleted) + } + }) + + // Check for respecting idempotency by making multiple additions with overlapping keys + // and removing only ref keys that were distinct. + t.Run("AddOverlappingRemoveBulk", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + }) + assert.True(ts, created) + assert.NoError(ts, err) + refsTable := []map[string]struct{}{ + {"ref2": {}, "ref3": {}}, + {"ref3": {}, "ref4": {}}, + {"ref4": {}, "ref5": {}}, + } + for _, refs := range refsTable { + created, err = Add(ioctx, rtName, refs) + assert.False(ts, created) + assert.NoError(ts, err) + } + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Normal, + "ref2": reftype.Normal, + "ref3": reftype.Normal, + "ref4": reftype.Normal, + "ref5": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) +} + +func TestRTMask(t *testing.T) { + t.Parallel() + + // Bulk masking all refs should result in reftracker object deletion. + t.Run("MaskAllBulk", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + keys := []string{"ref1", "ref2", "ref3"} + refsToAdd := make(map[string]struct{}) + refsToRemove := make(map[string]reftype.RefType) + for _, k := range keys { + refsToAdd[k] = struct{}{} + refsToRemove[k] = reftype.Mask + } + + created, err := Add(ioctx, rtName, refsToAdd) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, refsToRemove) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Masking all refs one-by-one should result in reftracker object deletion in + // the last Remove() call. + t.Run("RemoveSingle", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + for _, k := range []string{"ref3", "ref2"} { + deleted, errRemove := Remove(ioctx, rtName, map[string]reftype.RefType{ + k: reftype.Mask, + }) + assert.NoError(ts, errRemove) + assert.False(ts, deleted) + } + + // Remove the last reference. It should delete the whole reftracker object + // too. + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Mask, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Bulk removing two (out of 3) refs and then masking the ref that's left + // should result in reftracker object deletion in the last Remove() call. + t.Run("RemoveBulkMaskSingle", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Normal, + "ref2": reftype.Normal, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref3": reftype.Mask, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Bulk masking two (out of 3) refs and then removing the ref that's left + // should result in reftracker object deletion in the last Remove() call. + t.Run("MaskSingleRemoveBulk", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Mask, + "ref2": reftype.Mask, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref3": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Verify that masking refs hides them from future Add()s. + t.Run("MaskAndAdd", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Mask, + "ref2": reftype.Mask, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + created, err = Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + }) + assert.NoError(ts, err) + assert.False(ts, created) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref3": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) + + // Verify that masked refs may be removed with reftype.Normal and re-added. + t.Run("MaskRemoveAdd", func(ts *testing.T) { + ts.Parallel() + + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + + created, err := Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + "ref3": {}, + }) + assert.NoError(ts, err) + assert.True(ts, created) + + deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Mask, + "ref2": reftype.Mask, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Normal, + "ref2": reftype.Normal, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + created, err = Add(ioctx, rtName, map[string]struct{}{ + "ref1": {}, + "ref2": {}, + }) + assert.NoError(ts, err) + assert.False(ts, created) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref3": reftype.Normal, + }) + assert.NoError(ts, err) + assert.False(ts, deleted) + + deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{ + "ref1": reftype.Normal, + "ref2": reftype.Normal, + }) + assert.NoError(ts, err) + assert.True(ts, deleted) + }) +} diff --git a/internal/util/reftracker/reftype/reftype.go b/internal/util/reftracker/reftype/reftype.go new file mode 100644 index 000000000..e2d6d1587 --- /dev/null +++ b/internal/util/reftracker/reftype/reftype.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reftype + +import ( + "fmt" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" +) + +// RefType describes type of the reftracker reference. +type RefType int8 + +const ( + refTypeSize = 1 + + // Unknown reftype used to signal error state. + Unknown RefType = 0 + + // Normal type tags the reference to have normal effect on the reference + // count. Adding Normal reference increments the reference count. Removing + // Normal reference decrements the reference count. + // + // It may be converted to a Mask if it is removed with Mask reftype. + Normal RefType = 1 + + // Mask type tags the reference to be masked, making it not contribute to the + // overall reference count. The reference will be ignored by all future Add() + // calls until it is removed with Normal reftype. + Mask RefType = 2 +) + +func ToBytes(t RefType) []byte { + return []byte{byte(t)} +} + +func FromBytes(bs []byte) (RefType, error) { + if len(bs) != refTypeSize { + return Unknown, errors.UnexpectedReadSize(refTypeSize, len(bs)) + } + + num := RefType(bs[0]) + switch num { // nolint:exhaustive // reftype.Unknown is handled in default case. + case Normal, Mask: + return num, nil + default: + return Unknown, fmt.Errorf("unknown reftype %d", num) + } +} diff --git a/internal/util/reftracker/reftype/reftype_test.go b/internal/util/reftracker/reftype/reftype_test.go new file mode 100644 index 000000000..88d25d3a2 --- /dev/null +++ b/internal/util/reftracker/reftype/reftype_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reftype + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRefTypeBytes(t *testing.T) { + t.Parallel() + + var ( + refTypeNormalBytes = []byte{1} + refTypeMaskBytes = []byte{2} + + expectedBytes = [][]byte{refTypeNormalBytes, refTypeMaskBytes} + refTypes = []RefType{Normal, Mask} + + refTypeInvalidBytes = []byte{0xFF} + refTypeWrongSizeBytes = []byte{0, 0, 0, 0, 1} + ) + + t.Run("ToBytes", func(ts *testing.T) { + ts.Parallel() + + for i := range expectedBytes { + bs := ToBytes(refTypes[i]) + assert.Equal(ts, expectedBytes[i], bs) + } + }) + + t.Run("FromBytes", func(ts *testing.T) { + ts.Parallel() + + for i := range refTypes { + refType, err := FromBytes(expectedBytes[i]) + assert.NoError(ts, err) + assert.Equal(ts, refTypes[i], refType) + } + + _, err := FromBytes(refTypeInvalidBytes) + assert.Error(ts, err) + + _, err = FromBytes(refTypeWrongSizeBytes) + assert.Error(ts, err) + }) +} diff --git a/internal/util/reftracker/v1/refcount.go b/internal/util/reftracker/v1/refcount.go new file mode 100644 index 000000000..9fb5bd1e1 --- /dev/null +++ b/internal/util/reftracker/v1/refcount.go @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/binary" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" +) + +// Represents the number of references a reftracker object holds. +type refCount uint32 + +const ( + Version = 1 + + refCountSize = 4 +) + +func (rc refCount) toBytes() []byte { + bs := make([]byte, refCountSize) + binary.BigEndian.PutUint32(bs, uint32(rc)) + + return bs +} + +func refCountFromBytes(bs []byte) (refCount, error) { + if len(bs) != refCountSize { + return 0, errors.UnexpectedReadSize(refCountSize, len(bs)) + } + + return refCount(binary.BigEndian.Uint32(bs)), nil +} diff --git a/internal/util/reftracker/v1/refcount_test.go b/internal/util/reftracker/v1/refcount_test.go new file mode 100644 index 000000000..b3e7252db --- /dev/null +++ b/internal/util/reftracker/v1/refcount_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestV1RefCountBytes(t *testing.T) { + t.Parallel() + + var ( + refCountBytes = []byte{0x0, 0x0, 0x0, 0x7B} + refCountValue = refCount(123) + wrongSizeRefCountBytes = []byte{0, 0, 1} + ) + + t.Run("ToBytes", func(ts *testing.T) { + ts.Parallel() + + bs := refCountValue.toBytes() + assert.Equal(ts, refCountBytes, bs) + }) + + t.Run("FromBytes", func(ts *testing.T) { + ts.Parallel() + + rc, err := refCountFromBytes(refCountBytes) + assert.NoError(ts, err) + assert.Equal(ts, refCountValue, rc) + + _, err = refCountFromBytes(wrongSizeRefCountBytes) + assert.Error(ts, err) + }) +} diff --git a/internal/util/reftracker/v1/v1.go b/internal/util/reftracker/v1/v1.go new file mode 100644 index 000000000..bebeadbff --- /dev/null +++ b/internal/util/reftracker/v1/v1.go @@ -0,0 +1,314 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + goerrors "errors" + "fmt" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" + "github.com/ceph/ceph-csi/internal/util/reftracker/reftype" + "github.com/ceph/ceph-csi/internal/util/reftracker/version" + + "github.com/ceph/go-ceph/rados" +) + +/* + +Version 1 layout: +----------------- + +If not specified otherwise, all values are stored in big-endian order. + + byte idx type name + -------- ------ ------ + 0 .. 3 uint32 refcount + + `refcount`: Number of references held by the reftracker object. The actual + reference keys are stored in an OMap of the RADOS object. + + OMap entry layout: + + Key: + + reftracker key. + + Value: + + byte idx type name + -------- ------ ------ + 0 .. 3 uint32 type + + `type`: reference type defined in reftracker/reftype. + +*/ + +type readResult struct { + // Total number of references held by the reftracker object. + total refCount + // Refs whose keys matched the request. + foundRefs map[string]reftype.RefType +} + +// Atomically initializes a new reftracker object. +func Init( + ioctx radoswrapper.IOContextW, + rtName string, + refs map[string]struct{}, +) error { + // Prepare refcount and OMap key-value pairs. + + refsToAddBytes := make(map[string][]byte, len(refs)) + + for ref := range refs { + refsToAddBytes[ref] = reftype.ToBytes(reftype.Normal) + } + + // Perform the write. + + w := ioctx.CreateWriteOp() + defer w.Release() + + w.Create(rados.CreateExclusive) + w.SetXattr(version.XattrName, version.ToBytes(Version)) + w.SetOmap(refsToAddBytes) + w.WriteFull(refCount(len(refsToAddBytes)).toBytes()) + + return errors.FailedObjectWrite(w.Operate(rtName)) +} + +// Atomically adds refs to an existing reftracker object. +func Add( + ioctx radoswrapper.IOContextW, + rtName string, + gen uint64, + refs map[string]struct{}, +) error { + // Read the reftracker object to figure out which refs to add. + + readRes, err := readObjectByKeys(ioctx, rtName, gen, refsMapToKeysSlice(refs)) + if err != nil { + return errors.FailedObjectRead(err) + } + + // Build list of refs to add. + // Add only refs that are missing in the reftracker object. + + refsToAdd := make(map[string][]byte) + + for ref := range refs { + if _, found := readRes.foundRefs[ref]; !found { + refsToAdd[ref] = reftype.ToBytes(reftype.Normal) + } + } + + if len(refsToAdd) == 0 { + // Nothing to do. + return nil + } + + // Calculate new refcount. + + rcToAdd := refCount(len(refsToAdd)) + newRC := readRes.total + rcToAdd + + if newRC < readRes.total { + return goerrors.New("addition would overflow uint32 refcount") + } + + // Write the data. + + w := ioctx.CreateWriteOp() + defer w.Release() + + w.AssertVersion(gen) + w.WriteFull(newRC.toBytes()) + w.SetOmap(refsToAdd) + + return errors.FailedObjectWrite(w.Operate(rtName)) +} + +// Atomically removes refs from reftracker object. If the object wouldn't hold +// any references after the removal, the whole object is deleted instead. +func Remove( + ioctx radoswrapper.IOContextW, + rtName string, + gen uint64, + refs map[string]reftype.RefType, +) (bool, error) { + // Read the reftracker object to figure out which refs to remove. + + readRes, err := readObjectByKeys(ioctx, rtName, gen, typedRefsMapToKeysSlice(refs)) + if err != nil { + return false, errors.FailedObjectRead(err) + } + + // Build lists of refs to remove, replace, and add. + // There are three cases that need to be handled: + // (1) removing reftype.Normal refs, + // (2) converting refs that were reftype.Normal into reftype.Mask, + // (3) adding a new reftype.Mask key. + + var ( + refsToRemove []string + refsToSet = make(map[string][]byte) + rcToSubtract refCount + ) + + for ref, refType := range refs { + if matchedRefType, found := readRes.foundRefs[ref]; found { + if refType == reftype.Normal { + // Case (1): regular removal of Normal ref. + refsToRemove = append(refsToRemove, ref) + if matchedRefType == reftype.Normal { + // If matchedRef was reftype.Mask, it would have already been + // subtracted from the refcount. + rcToSubtract++ + } + } else if refType == reftype.Mask && matchedRefType == reftype.Normal { + // Case (2): convert Normal ref to Mask. + // Since this ref is now reftype.Mask, rcToSubtract needs to be adjusted + // too -- so that this ref is not counted in. + refsToSet[ref] = reftype.ToBytes(reftype.Mask) + rcToSubtract++ + } + } else { + if refType == reftype.Mask { + // Case (3): add a new Mask ref. + // reftype.Mask doesn't contribute refcount so no change to rcToSubtract. + refsToSet[ref] = reftype.ToBytes(reftype.Mask) + } // else: No such ref was found, so there's nothing to remove. + } + } + + if len(refsToRemove) == 0 && len(refsToSet) == 0 { + // Nothing to do. + return false, nil + } + + // Calculate new refcount. + + if rcToSubtract > readRes.total { + // BUG: this should never happen! + return false, fmt.Errorf("refcount underflow, reftracker object corrupted") + } + + newRC := readRes.total - rcToSubtract + // If newRC is zero, it means all refs that the reftracker object held will be + // now gone, and the object must be deleted. + deleted := newRC == 0 + + // Write the data. + + w := ioctx.CreateWriteOp() + defer w.Release() + + w.AssertVersion(gen) + + if deleted { + w.Remove() + } else { + w.WriteFull(newRC.toBytes()) + w.RmOmapKeys(refsToRemove) + w.SetOmap(refsToSet) + } + + if err := w.Operate(rtName); err != nil { + return false, errors.FailedObjectWrite(err) + } + + return deleted, nil +} + +// Tries to find `keys` in reftracker object and returns the result. Failing to +// find any particular key does not result in an error. +func readObjectByKeys( + ioctx radoswrapper.IOContextW, + rtName string, + gen uint64, + keys []string, +) (*readResult, error) { + // Read data from object. + + rcBytes := make([]byte, refCountSize) + + r := ioctx.CreateReadOp() + defer r.Release() + + r.AssertVersion(gen) + r.Read(0, rcBytes) + s := r.GetOmapValuesByKeys(keys) + + if err := r.Operate(rtName); err != nil { + return nil, errors.TryRADOSAborted(err) + } + + // Convert it from byte slices to type-safe values. + + var ( + rc refCount + refs = make(map[string]reftype.RefType) + err error + ) + + rc, err = refCountFromBytes(rcBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse refcount: %w", err) + } + + for { + kvPair, err := s.Next() + if err != nil { + return nil, fmt.Errorf("failed to iterate over OMap: %w", err) + } + + if kvPair == nil { + break + } + + refType, err := reftype.FromBytes(kvPair.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse reftype: %w", err) + } + + refs[kvPair.Key] = refType + } + + return &readResult{ + total: rc, + foundRefs: refs, + }, nil +} + +func refsMapToKeysSlice(m map[string]struct{}) []string { + s := make([]string, 0, len(m)) + for k := range m { + s = append(s, k) + } + + return s +} + +func typedRefsMapToKeysSlice(m map[string]reftype.RefType) []string { + s := make([]string, 0, len(m)) + for k := range m { + s = append(s, k) + } + + return s +} diff --git a/internal/util/reftracker/v1/v1_test.go b/internal/util/reftracker/v1/v1_test.go new file mode 100644 index 000000000..4466ea101 --- /dev/null +++ b/internal/util/reftracker/v1/v1_test.go @@ -0,0 +1,423 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + goerrors "errors" + "testing" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" + "github.com/ceph/ceph-csi/internal/util/reftracker/reftype" + + "github.com/stretchr/testify/assert" +) + +func TestV1Read(t *testing.T) { + t.Parallel() + + const rtName = "hello-rt" + + var ( + gen = uint64(0) + + validObj = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Data: []byte{0, 0, 0, 0}, + Omap: make(map[string][]byte), + }, + }, + }) + + invalidObjs = []*radoswrapper.FakeIOContext{ + // Missing object. + radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()), + // Bad generation number. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Ver: 123, + Oid: rtName, + Data: []byte{0, 0, 0, 0}, + }, + }, + }), + // Refcount overflow. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Data: []byte{0xFF, 0xFF, 0xFF, 0xFF}, + }, + }, + }), + } + + refsToAdd = map[string]struct{}{"ref1": {}} + ) + + err := Add(validObj, rtName, gen, refsToAdd) + assert.NoError(t, err) + + for i := range invalidObjs { + err = Add(invalidObjs[i], rtName, gen, refsToAdd) + assert.Error(t, err) + } + + // Check for correct error type for wrong gen num. + err = Add(invalidObjs[1], rtName, gen, refsToAdd) + assert.Error(t, err) + assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate)) +} + +func TestV1Init(t *testing.T) { + t.Parallel() + + const rtName = "hello-rt" + + var ( + emptyRados = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{}, + }) + + alreadyExists = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: {}, + }, + }) + + refsToInit = map[string]struct{}{"ref1": {}} + ) + + err := Init(emptyRados, rtName, refsToInit) + assert.NoError(t, err) + + err = Init(alreadyExists, rtName, refsToInit) + assert.Error(t, err) +} + +func TestV1Add(t *testing.T) { + t.Parallel() + + const rtName = "hello-rt" + + var ( + shouldSucceed = []struct { + before *radoswrapper.FakeObj + refsToAdd map[string]struct{} + after *radoswrapper.FakeObj + }{ + // Add a new ref. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToAdd: map[string]struct{}{ + "ref2": {}, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 1, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(2).toBytes(), + }, + }, + // Try to add a ref that's already tracked. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToAdd: map[string]struct{}{ + "ref1": {}, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + }, + // Try to add a ref that's masked. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Mask), + }, + Data: refCount(1).toBytes(), + }, + refsToAdd: map[string]struct{}{ + "ref1": {}, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Mask), + }, + Data: refCount(1).toBytes(), + }, + }, + } + + shouldFail = []*radoswrapper.FakeIOContext{ + // Missing object. + radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()), + // Bad generation number. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Ver: 123, + Oid: rtName, + Data: []byte{0, 0, 0, 0}, + }, + }, + }), + // Refcount overflow. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Data: []byte{0xFF, 0xFF, 0xFF, 0xFF}, + }, + }, + }), + } + ) + + for i := range shouldSucceed { + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + ioctx.Rados.Objs[rtName] = shouldSucceed[i].before + + err := Add(ioctx, rtName, 0, shouldSucceed[i].refsToAdd) + assert.NoError(t, err) + assert.Equal(t, shouldSucceed[i].after, ioctx.Rados.Objs[rtName]) + } + + for i := range shouldFail { + err := Add(shouldFail[i], rtName, 0, map[string]struct{}{"ref1": {}}) + assert.Error(t, err) + } + + // Check for correct error type for wrong gen num. + err := Add(shouldFail[1], rtName, 0, map[string]struct{}{"ref1": {}}) + assert.Error(t, err) + assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate)) +} + +func TestV1Remove(t *testing.T) { + t.Parallel() + + const rtName = "hello-rt" + + var ( + shouldSucceed = []struct { + before *radoswrapper.FakeObj + refsToRemove map[string]reftype.RefType + after *radoswrapper.FakeObj + deleted bool + }{ + // Remove without deleting the reftracker object. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(2).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref1": reftype.Normal, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 1, + Omap: map[string][]byte{ + "ref2": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + deleted: false, + }, + // Remove and delete the reftracker object. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref1": reftype.Normal, + }, + after: nil, + deleted: true, + }, + // Remove and delete the reftracker object. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref1": reftype.Normal, + }, + after: nil, + deleted: true, + }, + // Mask a ref without deleting reftracker object. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(2).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref2": reftype.Mask, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 1, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Mask), + }, + Data: refCount(1).toBytes(), + }, + deleted: false, + }, + // Mask a ref and delete reftracker object. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref1": reftype.Mask, + }, + after: nil, + deleted: true, + }, + // Add a masking ref. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref2": reftype.Mask, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 1, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + "ref2": reftype.ToBytes(reftype.Mask), + }, + Data: refCount(1).toBytes(), + }, + deleted: false, + }, + // Try to remove non-existent ref. + { + before: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + refsToRemove: map[string]reftype.RefType{ + "ref2": reftype.Normal, + }, + after: &radoswrapper.FakeObj{ + Oid: rtName, + Ver: 0, + Omap: map[string][]byte{ + "ref1": reftype.ToBytes(reftype.Normal), + }, + Data: refCount(1).toBytes(), + }, + deleted: false, + }, + } + + // Bad generation number. + badGen = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Ver: 123, + }, + }, + }) + ) + + for i := range shouldSucceed { + ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()) + ioctx.Rados.Objs[rtName] = shouldSucceed[i].before + + deleted, err := Remove(ioctx, rtName, 0, shouldSucceed[i].refsToRemove) + assert.NoError(t, err) + assert.Equal(t, shouldSucceed[i].deleted, deleted) + assert.Equal(t, shouldSucceed[i].after, ioctx.Rados.Objs[rtName]) + } + + _, err := Remove(badGen, rtName, 0, map[string]reftype.RefType{"ref": reftype.Normal}) + assert.Error(t, err) + assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate)) +} diff --git a/internal/util/reftracker/version/version.go b/internal/util/reftracker/version/version.go new file mode 100644 index 000000000..600e9f8a3 --- /dev/null +++ b/internal/util/reftracker/version/version.go @@ -0,0 +1,64 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "encoding/binary" + + "github.com/ceph/ceph-csi/internal/util/reftracker/errors" + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" +) + +// reftracker objects are versioned, should the object layout need to change. +// Version is stored in its underlying RADOS object xattr as uint32. + +const ( + // Name of the xattr entry in the RADOS object. + XattrName = "csi.ceph.com/rt-version" + + // SizeBytes is the size of version in bytes. + SizeBytes = 4 +) + +func ToBytes(v uint32) []byte { + bs := make([]byte, SizeBytes) + binary.BigEndian.PutUint32(bs, v) + + return bs +} + +func FromBytes(bs []byte) (uint32, error) { + if len(bs) != SizeBytes { + return 0, errors.UnexpectedReadSize(SizeBytes, len(bs)) + } + + return binary.BigEndian.Uint32(bs), nil +} + +func Read(ioctx radoswrapper.IOContextW, rtName string) (uint32, error) { + verBytes := make([]byte, SizeBytes) + readSize, err := ioctx.GetXattr(rtName, XattrName, verBytes) + if err != nil { + return 0, err + } + + if readSize != SizeBytes { + return 0, errors.UnexpectedReadSize(SizeBytes, readSize) + } + + return FromBytes(verBytes) +} diff --git a/internal/util/reftracker/version/version_test.go b/internal/util/reftracker/version/version_test.go new file mode 100644 index 000000000..d48e10182 --- /dev/null +++ b/internal/util/reftracker/version/version_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "testing" + + "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" + + "github.com/stretchr/testify/assert" +) + +var ( + v1Bytes = []byte{0, 0, 0, 1} + v1Value = uint32(1) + + wrongSizeVersionBytes = []byte{0, 0, 1} +) + +func TestVersionBytes(t *testing.T) { + t.Parallel() + + t.Run("ToBytes", func(ts *testing.T) { + ts.Parallel() + + bs := ToBytes(v1Value) + assert.Equal(ts, v1Bytes, bs) + }) + + t.Run("FromBytes", func(ts *testing.T) { + ts.Parallel() + + ver, err := FromBytes(v1Bytes) + assert.NoError(ts, err) + assert.Equal(ts, v1Value, ver) + + _, err = FromBytes(wrongSizeVersionBytes) + assert.Error(ts, err) + }) +} + +func TestVersionRead(t *testing.T) { + t.Parallel() + + const rtName = "hello-rt" + + var ( + validObj = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Xattrs: map[string][]byte{ + XattrName: v1Bytes, + }, + }, + }, + }) + + invalidObjs = []*radoswrapper.FakeIOContext{ + // Missing object. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{}, + }), + // Missing xattr. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Xattrs: map[string][]byte{ + "some-other-xattr": v1Bytes, + }, + }, + }, + }), + // Wrongly sized version value. + radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{ + Objs: map[string]*radoswrapper.FakeObj{ + rtName: { + Oid: rtName, + Xattrs: map[string][]byte{ + XattrName: wrongSizeVersionBytes, + }, + }, + }, + }), + } + ) + + ver, err := Read(validObj, rtName) + assert.NoError(t, err) + assert.Equal(t, v1Value, ver) + + for i := range invalidObjs { + _, err = Read(invalidObjs[i], rtName) + assert.Error(t, err) + } +} diff --git a/internal/util/topology.go b/internal/util/topology.go index 51b61fbe4..73ae9728b 100644 --- a/internal/util/topology.go +++ b/internal/util/topology.go @@ -168,14 +168,14 @@ func GetTopologyFromRequest( return &topologyPools, accessibilityRequirements, nil } -// MatchTopologyForPool returns the topology map, if the passed in pool matches any +// MatchPoolAndTopology returns the topology map, if the passed in pool matches any // passed in accessibility constraints. -func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool, - accessibilityRequirements *csi.TopologyRequirement, poolName string) (map[string]string, error) { +func MatchPoolAndTopology(topologyPools *[]TopologyConstrainedPool, + accessibilityRequirements *csi.TopologyRequirement, poolName string) (string, string, map[string]string, error) { var topologyPool []TopologyConstrainedPool if topologyPools == nil || accessibilityRequirements == nil { - return nil, nil + return "", "", nil, nil } // find the pool in the list of topology based pools @@ -187,13 +187,11 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool, } } if len(topologyPool) == 0 { - return nil, fmt.Errorf("none of the configured topology pools (%+v) matched passed in pool name (%s)", + return "", "", nil, fmt.Errorf("none of the configured topology pools (%+v) matched passed in pool name (%s)", topologyPools, poolName) } - _, _, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements) - - return topology, err + return FindPoolAndTopology(&topologyPool, accessibilityRequirements) } // FindPoolAndTopology loops through passed in "topologyPools" and also related diff --git a/internal/util/topology_test.go b/internal/util/topology_test.go index 71c5416b6..32ead00f9 100644 --- a/internal/util/topology_test.go +++ b/internal/util/topology_test.go @@ -37,7 +37,7 @@ func checkAndReportError(t *testing.T, msg string, err error) { } } -// TestFindPoolAndTopology also tests MatchTopologyForPool. +// TestFindPoolAndTopology also tests MatchPoolAndTopology. func TestFindPoolAndTopology(t *testing.T) { t.Parallel() var err error @@ -319,15 +319,15 @@ func TestFindPoolAndTopology(t *testing.T) { t.Errorf("expected data pool to be named ec-%s, got %s", poolName, dataPoolName) } - // TEST: MatchTopologyForPool + // TEST: MatchPoolAndTopology // check for non-existent pool - _, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1+"fuzz") + _, _, _, err = MatchPoolAndTopology(&validMultipleTopoPools, &validAccReq, pool1+"fuzz") if err == nil { t.Errorf("expected failure due to non-existent pool name (%s) got success", pool1+"fuzz") } // check for existing pool - topoSegment, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1) + _, _, topoSegment, err = MatchPoolAndTopology(&validMultipleTopoPools, &validAccReq, pool1) err = checkOutput(err, pool1, topoSegment) checkAndReportError(t, "expected success got:", err) } diff --git a/internal/util/util.go b/internal/util/util.go index 6bf522713..c67fd0570 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -308,6 +308,18 @@ func IsMountPoint(p string) (bool, error) { return !notMnt, nil } +// IsCorruptedMountError checks if the given error is a result of a corrupted +// mountpoint. +func IsCorruptedMountError(err error) bool { + return mount.IsCorruptedMnt(err) +} + +// ReadMountInfoForProc reads /proc//mountpoint and marshals it into +// MountInfo structs. +func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) { + return mount.ParseMountInfo(fmt.Sprintf("/proc/%s/mountinfo", proc)) +} + // Mount mounts the source to target path. func Mount(source, target, fstype string, options []string) error { dummyMount := mount.New("") diff --git a/scripts/Dockerfile.devel b/scripts/Dockerfile.devel index e7d502426..33afbb8d4 100644 --- a/scripts/Dockerfile.devel +++ b/scripts/Dockerfile.devel @@ -29,6 +29,8 @@ RUN dnf -y install \ librados-devel \ librbd-devel \ && dnf -y update \ + && dnf clean all \ + && rm -rf /var/cache/yum \ && true WORKDIR "/go/src/github.com/ceph/ceph-csi" diff --git a/scripts/golangci.yml.in b/scripts/golangci.yml.in index 26d60bf80..7af2c05b2 100644 --- a/scripts/golangci.yml.in +++ b/scripts/golangci.yml.in @@ -6,7 +6,7 @@ # options for analysis running run: build-tags: - - @@CEPH_VERSION@@ +@@BUILD_TAGS@@ # default concurrency is a available CPU number concurrency: 4 @@ -169,7 +169,11 @@ linters: - funlen - testpackage - exhaustivestruct - # TODO: enable goerr113, see: https://github.com/ceph/ceph-csi/issues/1227 + # This requires extra addition of unnecessary code. Hence, we + # prefer to disable this linter. But, it can be enabled if we + # have better resolution. For more details check the + # issue below. + # see: https://github.com/ceph/ceph-csi/issues/1227 - goerr113 - forbidigo # TODO: enable gomoddirectives diff --git a/scripts/minikube.sh b/scripts/minikube.sh index 9e2d5ca40..549dbc35c 100755 --- a/scripts/minikube.sh +++ b/scripts/minikube.sh @@ -124,6 +124,21 @@ function validate_container_cmd() { fi } +# validate csi sidecar image version +function validate_sidecar() { + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +# shellcheck disable=SC1091 + source "${SCRIPT_DIR}/../build.env" + + sidecars=(CSI_ATTACHER_VERSION CSI_SNAPSHOTTER_VERSION CSI_PROVISIONER_VERSION CSI_RESIZER_VERSION CSI_NODE_DRIVER_REGISTRAR_VERSION) + for sidecar in "${sidecars[@]}"; do + if [[ -z "${!sidecar}" ]]; then + echo "${sidecar}" version is empty, make sure build.env has set this sidecar version + exit 1 + fi +done +} + # Storage providers and the default storage class is not needed for Ceph-CSI # testing. In order to reduce resources and potential conflicts between storage # plugins, disable them. @@ -161,13 +176,6 @@ else DISK_CONFIG="" fi -#configure csi sidecar version -CSI_ATTACHER_VERSION=${CSI_ATTACHER_VERSION:-"v3.2.1"} -CSI_SNAPSHOTTER_VERSION=${CSI_SNAPSHOTTER_VERSION:-"v4.1.1"} -CSI_PROVISIONER_VERSION=${CSI_PROVISIONER_VERSION:-"v2.2.2"} -CSI_RESIZER_VERSION=${CSI_RESIZER_VERSION:-"v1.2.0"} -CSI_NODE_DRIVER_REGISTRAR_VERSION=${CSI_NODE_DRIVER_REGISTRAR_VERSION:-"v2.2.0"} - # configure csi image version CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"} @@ -290,6 +298,8 @@ cephcsi) copy_image_to_cluster "${CEPHCSI_IMAGE_REPO}"/cephcsi:"${CSI_IMAGE_VERSION}" "${CEPHCSI_IMAGE_REPO}"/cephcsi:"${CSI_IMAGE_VERSION}" ;; k8s-sidecar) + echo "validating sidecar's image version" + validate_sidecar echo "copying the kubernetes sidecar images" copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}" "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}" copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}" "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}" diff --git a/tools/yamlgen/main.go b/tools/yamlgen/main.go index eb255c2f6..2d6411e35 100644 --- a/tools/yamlgen/main.go +++ b/tools/yamlgen/main.go @@ -19,8 +19,10 @@ package main import ( "fmt" "os" + "path" "reflect" + "github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs" "github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd" "github.com/ceph/ceph-csi/api/deploy/ocp" ) @@ -46,6 +48,11 @@ var yamlArtifacts = []deploymentArtifact{ reflect.ValueOf(ocp.NewSecurityContextConstraintsYAML), reflect.ValueOf(ocp.SecurityContextConstraintsDefaults), }, + { + "../deploy/nfs/kubernetes/csidriver.yaml", + reflect.ValueOf(nfs.NewCSIDriverYAML), + reflect.ValueOf(nfs.CSIDriverDefaults), + }, { "../deploy/rbd/kubernetes/csidriver.yaml", reflect.ValueOf(rbd.NewCSIDriverYAML), @@ -67,6 +74,15 @@ func main() { func writeArtifact(artifact deploymentArtifact) { fmt.Printf("creating %q...", artifact.filename) + dir := path.Dir(artifact.filename) + _, err := os.Stat(dir) + if os.IsNotExist(err) { + err = os.MkdirAll(dir, 0o775) + if err != nil { + panic(fmt.Sprintf("failed to create directory %q: %v", dir, err)) + } + } + f, err := os.Create(artifact.filename) if err != nil { panic(fmt.Sprintf("failed to create file %q: %v", artifact.filename, err)) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt new file mode 100644 index 000000000..5f14d1162 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go new file mode 100644 index 000000000..df2abb58c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -0,0 +1,166 @@ +package aws + +import ( + "net/http" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// HTTPClient provides the interface to provide custom HTTPClients. Generally +// *http.Client is sufficient for most use cases. The HTTPClient should not +// follow redirects. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// A Config provides service configuration for service clients. +type Config struct { + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for + // information on AWS regions. + Region string + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials CredentialsProvider + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // The SDK defaults to a BuildableClient allowing API clients to create + // copies of the HTTP Client for service specific customizations. + // + // Use a (*http.Client) for custom behavior. Using a custom http.Client + // will prevent the SDK from modifying the HTTP client. + HTTPClient HTTPClient + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // See the `aws.EndpointResolver` documentation for additional usage + // information. + // + // Deprecated: See Config.EndpointResolverWithOptions + EndpointResolver EndpointResolver + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // When EndpointResolverWithOptions is specified, it will be used by a + // service client rather than using EndpointResolver if also specified. + // + // See the `aws.EndpointResolverWithOptions` documentation for additional + // usage information. + EndpointResolverWithOptions EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMode RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + // + // In general, the provider function should return a new instance of a + // Retryer if you are attempting to provide a consistent Retryer + // configuration across all clients. This will ensure that each client will + // be provided a new instance of the Retryer implementation, and will avoid + // issues such as sharing the same retry token bucket across services. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + // clients. + Retryer func() Retryer + + // ConfigSources are the sources that were used to construct the Config. + // Allows for additional configuration to be loaded by clients. + ConfigSources []interface{} + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // The logger writer interface to write logging messages to. Defaults to + // standard error. + Logger logging.Logger + + // Configures the events that will be sent to the configured logger. This + // can be used to configure the logging of signing, retries, request, and + // responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode ClientLogMode + + // The configured DefaultsMode. If not specified, service clients will + // default to legacy. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, + // standard + DefaultsMode DefaultsMode + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode + // is set to DefaultsModeAuto and is initialized by + // `config.LoadDefaultConfig`. You should not populate this structure + // programmatically, or rely on the values here within your applications. + RuntimeEnvironment RuntimeEnvironment +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +func NewConfig() *Config { + return &Config{} +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c Config) Copy() Config { + cp := c + return cp +} + +// EndpointDiscoveryEnableState indicates if endpoint discovery is +// enabled, disabled, auto or unset state. +// +// Default behavior (Auto or Unset) indicates operations that require endpoint +// discovery will use Endpoint Discovery by default. Operations that +// optionally use Endpoint Discovery will not use Endpoint Discovery +// unless EndpointDiscovery is explicitly enabled. +type EndpointDiscoveryEnableState uint + +// Enumeration values for EndpointDiscoveryEnableState +const ( + // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. + // Users do not need to use this value explicitly. The behavior for unset + // is the same as for EndpointDiscoveryAuto. + EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota + + // EndpointDiscoveryAuto represents an AUTO state that allows endpoint + // discovery only when required by the api. This is the default + // configuration resolved by the client if endpoint discovery is neither + // enabled or disabled. + EndpointDiscoveryAuto // default state + + // EndpointDiscoveryDisabled indicates client MUST not perform endpoint + // discovery even when required. + EndpointDiscoveryDisabled + + // EndpointDiscoveryEnabled indicates client MUST always perform endpoint + // discovery if supported for the operation. + EndpointDiscoveryEnabled +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go new file mode 100644 index 000000000..4d8e26ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go @@ -0,0 +1,22 @@ +package aws + +import ( + "context" + "time" +) + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go new file mode 100644 index 000000000..1411a5c32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -0,0 +1,139 @@ +package aws + +import ( + "context" + "sync/atomic" + "time" + + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" +) + +// CredentialsCacheOptions are the options +type CredentialsCacheOptions struct { + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // An ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. This can cause an + // increased number of requests to refresh the credentials to occur. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials + // within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0. + // + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to + // expire between 30 to 60 seconds prior to their actual expiration time. + // + // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. + // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. + // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. + // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. + ExpiryWindowJitterFrac float64 +} + +// CredentialsCache provides caching and concurrency safe credentials retrieval +// via the provider's retrieve method. +type CredentialsCache struct { + // provider is the CredentialProvider implementation to be wrapped by the CredentialCache. + provider CredentialsProvider + + options CredentialsCacheOptions + creds atomic.Value + sf singleflight.Group +} + +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic +// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for +// configuration of credential expiry window and jitter. +func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { + options := CredentialsCacheOptions{} + + for _, fn := range optFns { + fn(&options) + } + + if options.ExpiryWindow < 0 { + options.ExpiryWindow = 0 + } + + if options.ExpiryWindowJitterFrac < 0 { + options.ExpiryWindowJitterFrac = 0 + } else if options.ExpiryWindowJitterFrac > 1 { + options.ExpiryWindowJitterFrac = 1 + } + + return &CredentialsCache{ + provider: provider, + options: options, + } +} + +// Retrieve returns the credentials. If the credentials have already been +// retrieved, and not expired the cached credentials will be returned. If the +// credentials have not been retrieved yet, or expired the provider's Retrieve +// method will be called. +// +// Returns and error if the provider's retrieve method returns an error. +func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { + if creds := p.getCreds(); creds != nil { + return *creds, nil + } + + resCh := p.sf.DoChan("", func() (interface{}, error) { + return p.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Credentials), res.Err + case <-ctx.Done(): + return Credentials{}, &RequestCanceledError{Err: ctx.Err()} + } +} + +func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { + if creds := p.getCreds(); creds != nil { + return *creds, nil + } + + creds, err := p.provider.Retrieve(ctx) + if err == nil { + if creds.CanExpire { + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, err + } + jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter)) + } + + p.creds.Store(&creds) + } + + return creds, err +} + +func (p *CredentialsCache) getCreds() *Credentials { + v := p.creds.Load() + if v == nil { + return nil + } + + c := v.(*Credentials) + if c != nil && c.HasKeys() && !c.Expired() { + return c + } + + return nil +} + +// Invalidate will invalidate the cached credentials. The next call to Retrieve +// will cause the provider's Retrieve method to be called. +func (p *CredentialsCache) Invalidate() { + p.creds.Store((*Credentials)(nil)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go new file mode 100644 index 000000000..ce3868a9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// AnonymousCredentials provides a sentinel CredentialsProvider that should be +// used to instruct the SDK's signing middleware to not sign the request. +// +// Using `nil` credentials when configuring an API client will achieve the same +// result. The AnonymousCredentials type allows you to configure the SDK's +// external config loading to not attempt to source credentials from the shared +// config or environment. +// +// For example you can use this CredentialsProvider with an API client's +// Options to instruct the client not to sign a request for accessing public +// S3 bucket objects. +// +// The following example demonstrates using the AnonymousCredentials to prevent +// SDK's external config loading attempt to resolve credentials. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(cfg) +// +// Alternatively you can leave the API client Option's `Credential` member to +// nil. If using the `NewFromConfig` constructor you'll need to explicitly set +// the `Credentials` member to nil, if the external config resolved a +// credential provider. +// +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) +// +// This can also be configured for specific operations calls too. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(config) +// +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +type AnonymousCredentials struct{} + +// Retrieve implements the CredentialsProvider interface, but will always +// return error, and cannot be used to sign a request. The AnonymousCredentials +// type is used as a sentinel type instructing the AWS request signing +// middleware to not sign a request. +func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { + return Credentials{Source: "AnonymousCredentials"}, + fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") +} + +// A Credentials is the AWS credentials value for individual credential fields. +type Credentials struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Source of the credentials + Source string + + // Time the credentials will expire. + CanExpire bool + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return !v.Expires.After(sdk.NowTime().Round(0)) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 +} + +// A CredentialsProvider is the interface for any component which will provide +// credentials Credentials. A CredentialsProvider is required to manage its own +// Expired state, and what to be expired means. +// +// A credentials provider implementation can be wrapped with a CredentialCache +// to cache the credential value retrieved. Without the cache the SDK will +// attempt to retrieve the credentials for every request. +type CredentialsProvider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// Retrieve delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { + return fn(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go new file mode 100644 index 000000000..fd408e518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go @@ -0,0 +1,38 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "runtime" + "strings" +) + +var getGOOS = func() string { + return runtime.GOOS +} + +// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode +// is set to aws.DefaultsModeAuto. +func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { + goos := getGOOS() + if goos == "android" || goos == "ios" { + return aws.DefaultsModeMobile + } + + var currentRegion string + if len(environment.EnvironmentIdentifier) > 0 { + currentRegion = environment.Region + } + + if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { + currentRegion = environment.EC2InstanceMetadataRegion + } + + if len(region) > 0 && len(currentRegion) > 0 { + if strings.EqualFold(region, currentRegion) { + return aws.DefaultsModeInRegion + } + return aws.DefaultsModeCrossRegion + } + + return aws.DefaultsModeStandard +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go new file mode 100644 index 000000000..8b7e01fa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go @@ -0,0 +1,43 @@ +package defaults + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// Configuration is the set of SDK configuration options that are determined based +// on the configured DefaultsMode. +type Configuration struct { + // RetryMode is the configuration's default retry mode API clients should + // use for constructing a Retryer. + RetryMode aws.RetryMode + + // ConnectTimeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // See https://pkg.go.dev/net#Dialer.Timeout + ConnectTimeout *time.Duration + + // TLSNegotiationTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. + // + // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout + TLSNegotiationTimeout *time.Duration +} + +// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. +func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { + if c.ConnectTimeout == nil { + return 0, false + } + return *c.ConnectTimeout, true +} + +// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. +func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { + if c.TLSNegotiationTimeout == nil { + return 0, false + } + return *c.TLSNegotiationTimeout, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go new file mode 100644 index 000000000..dbaa873dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go @@ -0,0 +1,50 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. + +package defaults + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "time" +) + +// GetModeConfiguration returns the default Configuration descriptor for the given mode. +// +// Supports the following modes: cross-region, in-region, mobile, standard +func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { + var mv aws.DefaultsMode + mv.SetFromString(string(mode)) + + switch mv { + case aws.DefaultsModeCrossRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeInRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(1100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeMobile: + settings := Configuration{ + ConnectTimeout: aws.Duration(30000 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeStandard: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + default: + return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go new file mode 100644 index 000000000..2d90011b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go @@ -0,0 +1,2 @@ +// Package defaults provides recommended configuration values for AWS SDKs and CLIs. +package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go new file mode 100644 index 000000000..fcf9387c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go @@ -0,0 +1,95 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. + +package aws + +import ( + "strings" +) + +// DefaultsMode is the SDK defaults mode setting. +type DefaultsMode string + +// The DefaultsMode constants. +const ( + // DefaultsModeAuto is an experimental mode that builds on the standard mode. + // The SDK will attempt to discover the execution environment to determine the + // appropriate settings automatically. + // + // Note that the auto detection is heuristics-based and does not guarantee 100% + // accuracy. STANDARD mode will be used if the execution environment cannot + // be determined. The auto detection might query EC2 Instance Metadata service + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), + // which might introduce latency. Therefore we recommend choosing an explicit + // defaults_mode instead if startup latency is critical to your application + DefaultsModeAuto DefaultsMode = "auto" + + // DefaultsModeCrossRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services in a different region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeCrossRegion DefaultsMode = "cross-region" + + // DefaultsModeInRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services from within the same AWS + // region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeInRegion DefaultsMode = "in-region" + + // DefaultsModeLegacy provides default settings that vary per SDK and were used + // prior to establishment of defaults_mode + DefaultsModeLegacy DefaultsMode = "legacy" + + // DefaultsModeMobile builds on the standard mode and includes optimization + // tailored for mobile applications + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeMobile DefaultsMode = "mobile" + + // DefaultsModeStandard provides the latest recommended default values that + // should be safe to run in most scenarios + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeStandard DefaultsMode = "standard" +) + +// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches +// the provided string when compared using EqualFold. If the value does not match a known +// constant it will be set to as-is and the function will return false. As a special case, if the +// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. +func (d *DefaultsMode) SetFromString(v string) (ok bool) { + switch { + case strings.EqualFold(v, string(DefaultsModeAuto)): + *d = DefaultsModeAuto + ok = true + case strings.EqualFold(v, string(DefaultsModeCrossRegion)): + *d = DefaultsModeCrossRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeInRegion)): + *d = DefaultsModeInRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeLegacy)): + *d = DefaultsModeLegacy + ok = true + case strings.EqualFold(v, string(DefaultsModeMobile)): + *d = DefaultsModeMobile + ok = true + case strings.EqualFold(v, string(DefaultsModeStandard)): + *d = DefaultsModeStandard + ok = true + case len(v) == 0: + *d = DefaultsModeLegacy + ok = true + default: + *d = DefaultsMode(v) + } + return ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go new file mode 100644 index 000000000..befc3bee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -0,0 +1,62 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.ToString(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws + +// generate.go uses a build tag of "ignore", go run doesn't need to specify +// this because go run ignores all build flags when running a go file directly. +//go:generate go run -tags codegen generate.go +//go:generate go run -tags codegen logging_generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go new file mode 100644 index 000000000..aa10a9b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" +) + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { + type iface interface { + GetUseDualStackEndpoint() DualStackEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseDualStackEndpoint() + found = true + break + } + } + return value, found +} + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { + type iface interface { + GetUseFIPSEndpoint() FIPSEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseFIPSEndpoint() + found = true + break + } + } + return value, found +} + +// Endpoint represents the endpoint a service client should make API operation +// calls to. +// +// The SDK will automatically resolve these endpoints per API client using an +// internal endpoint resolvers. If you'd like to provide custom endpoint +// resolving behavior you can implement the EndpointResolver interface. +type Endpoint struct { + // The base URL endpoint the SDK API clients will use to make API calls to. + // The SDK will suffix URI path and query elements to this endpoint. + URL string + + // Specifies if the endpoint's hostname can be modified by the SDK's API + // client. + // + // If the hostname is mutable the SDK API clients may modify any part of + // the hostname based on the requirements of the API, (e.g. adding, or + // removing content in the hostname). Such as, Amazon S3 API client + // prefixing "bucketname" to the hostname, or changing the + // hostname service name component from "s3." to "s3-accesspoint.dualstack." + // for the dualstack endpoint of an S3 Accesspoint resource. + // + // Care should be taken when providing a custom endpoint for an API. If the + // endpoint hostname is mutable, and the client cannot modify the endpoint + // correctly, the operation call will most likely fail, or have undefined + // behavior. + // + // If hostname is immutable, the SDK API clients will not modify the + // hostname of the URL. This may cause the API client not to function + // correctly if the API requires the operation specific hostname values + // to be used by the client. + // + // This flag does not modify the API client's behavior if this endpoint + // will be used instead of Endpoint Discovery, or if the endpoint will be + // used to perform Endpoint Discovery. That behavior is configured via the + // API Client's Options. + HostnameImmutable bool + + // The AWS partition the endpoint belongs to. + PartitionID string + + // The service name that should be used for signing the requests to the + // endpoint. + SigningName string + + // The region that should be used for signing the request to the endpoint. + SigningRegion string + + // The signing method that should be used for signing the requests to the + // endpoint. + SigningMethod string + + // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + // When providing a custom endpoint, you should set the source as EndpointSourceCustom. + // If source is not provided when providing a custom endpoint, the SDK may not + // perform required host mutations correctly. Source should be used along with + // HostnameImmutable property as per the usage requirement. + Source EndpointSource +} + +// EndpointSource is the endpoint source type. +type EndpointSource int + +const ( + // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. + EndpointSourceServiceMetadata EndpointSource = iota + + // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when + // user provides a custom endpoint to be used by the SDK. + EndpointSourceCustom +) + +// EndpointNotFoundError is a sentinel error to indicate that the +// EndpointResolver implementation was unable to resolve an endpoint for the +// given service and region. Resolvers should use this to indicate that an API +// client should fallback and attempt to use it's internal default resolver to +// resolve the endpoint. +type EndpointNotFoundError struct { + Err error +} + +// Error is the error message. +func (e *EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found, %v", e.Err) +} + +// Unwrap returns the underlying error. +func (e *EndpointNotFoundError) Unwrap() error { + return e.Err +} + +// EndpointResolver is an endpoint resolver that can be used to provide or +// override an endpoint for the given service and region. API clients will +// attempt to use the EndpointResolver first to resolve an endpoint if +// available. If the EndpointResolver returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +// +// Deprecated: See EndpointResolverWithOptions +type EndpointResolver interface { + ResolveEndpoint(service, region string) (Endpoint, error) +} + +// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. +// +// Deprecated: See EndpointResolverWithOptionsFunc +type EndpointResolverFunc func(service, region string) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +// +// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint +func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { + return e(service, region) +} + +// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or +// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will +// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if +// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +type EndpointResolverWithOptions interface { + ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) +} + +// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { + return e(service, region, options...) +} + +// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. +// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. +func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { + type iface interface { + GetDisableHTTPS() bool + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetDisableHTTPS() + found = true + break + } + } + return value, found +} + +// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. +// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. +func GetResolvedRegion(options ...interface{}) (value string, found bool) { + type iface interface { + GetResolvedRegion() string + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetResolvedRegion() + found = true + break + } + } + return value, found +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go new file mode 100644 index 000000000..f390a08f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go @@ -0,0 +1,9 @@ +package aws + +// MissingRegionError is an error that is returned if region configuration +// value was not found. +type MissingRegionError struct{} + +func (*MissingRegionError) Error() string { + return "an AWS region is required, but was not found" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go new file mode 100644 index 000000000..2394418e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go @@ -0,0 +1,365 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + return ptr.ToBool(p) +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + return ptr.ToBoolSlice(vs) +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + return ptr.ToBoolMap(vs) +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + return ptr.ToByte(p) +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + return ptr.ToByteSlice(vs) +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + return ptr.ToByteMap(vs) +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + return ptr.ToString(p) +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + return ptr.ToStringSlice(vs) +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + return ptr.ToStringMap(vs) +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + return ptr.ToInt(p) +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + return ptr.ToIntSlice(vs) +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + return ptr.ToIntMap(vs) +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + return ptr.ToInt8(p) +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + return ptr.ToInt8Slice(vs) +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + return ptr.ToInt8Map(vs) +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + return ptr.ToInt16(p) +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + return ptr.ToInt16Slice(vs) +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + return ptr.ToInt16Map(vs) +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + return ptr.ToInt32(p) +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + return ptr.ToInt32Slice(vs) +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + return ptr.ToInt32Map(vs) +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + return ptr.ToInt64(p) +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + return ptr.ToInt64Slice(vs) +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + return ptr.ToInt64Map(vs) +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + return ptr.ToUint(p) +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + return ptr.ToUintSlice(vs) +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + return ptr.ToUintMap(vs) +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + return ptr.ToUint8(p) +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + return ptr.ToUint8Slice(vs) +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + return ptr.ToUint8Map(vs) +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + return ptr.ToUint16(p) +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + return ptr.ToUint16Slice(vs) +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + return ptr.ToUint16Map(vs) +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + return ptr.ToUint32(p) +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + return ptr.ToUint32Slice(vs) +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + return ptr.ToUint32Map(vs) +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + return ptr.ToUint64(p) +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + return ptr.ToUint64Slice(vs) +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + return ptr.ToUint64Map(vs) +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + return ptr.ToFloat32(p) +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + return ptr.ToFloat32Slice(vs) +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + return ptr.ToFloat32Map(vs) +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + return ptr.ToFloat64(p) +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + return ptr.ToFloat64Slice(vs) +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + return ptr.ToFloat64Map(vs) +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + return ptr.ToTime(p) +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + return ptr.ToTimeSlice(vs) +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + return ptr.ToTimeMap(vs) +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + return ptr.ToDuration(p) +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + return ptr.ToDurationSlice(vs) +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + return ptr.ToDurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go new file mode 100644 index 000000000..303e48c5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package aws + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.15.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go new file mode 100644 index 000000000..9e34d26f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -0,0 +1,117 @@ +// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( + LogSigning ClientLogMode = 1 << (64 - 1 - iota) + LogRetries + LogRequest + LogRequestWithBody + LogResponse + LogResponseWithBody + LogDeprecatedUsage + LogRequestEventMessage + LogResponseEventMessage +) + +// IsSigning returns whether the Signing logging mode bit is set +func (m ClientLogMode) IsSigning() bool { + return m&LogSigning != 0 +} + +// IsRetries returns whether the Retries logging mode bit is set +func (m ClientLogMode) IsRetries() bool { + return m&LogRetries != 0 +} + +// IsRequest returns whether the Request logging mode bit is set +func (m ClientLogMode) IsRequest() bool { + return m&LogRequest != 0 +} + +// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set +func (m ClientLogMode) IsRequestWithBody() bool { + return m&LogRequestWithBody != 0 +} + +// IsResponse returns whether the Response logging mode bit is set +func (m ClientLogMode) IsResponse() bool { + return m&LogResponse != 0 +} + +// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set +func (m ClientLogMode) IsResponseWithBody() bool { + return m&LogResponseWithBody != 0 +} + +// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set +func (m ClientLogMode) IsDeprecatedUsage() bool { + return m&LogDeprecatedUsage != 0 +} + +// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set +func (m ClientLogMode) IsRequestEventMessage() bool { + return m&LogRequestEventMessage != 0 +} + +// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set +func (m ClientLogMode) IsResponseEventMessage() bool { + return m&LogResponseEventMessage != 0 +} + +// ClearSigning clears the Signing logging mode bit +func (m *ClientLogMode) ClearSigning() { + *m &^= LogSigning +} + +// ClearRetries clears the Retries logging mode bit +func (m *ClientLogMode) ClearRetries() { + *m &^= LogRetries +} + +// ClearRequest clears the Request logging mode bit +func (m *ClientLogMode) ClearRequest() { + *m &^= LogRequest +} + +// ClearRequestWithBody clears the RequestWithBody logging mode bit +func (m *ClientLogMode) ClearRequestWithBody() { + *m &^= LogRequestWithBody +} + +// ClearResponse clears the Response logging mode bit +func (m *ClientLogMode) ClearResponse() { + *m &^= LogResponse +} + +// ClearResponseWithBody clears the ResponseWithBody logging mode bit +func (m *ClientLogMode) ClearResponseWithBody() { + *m &^= LogResponseWithBody +} + +// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit +func (m *ClientLogMode) ClearDeprecatedUsage() { + *m &^= LogDeprecatedUsage +} + +// ClearRequestEventMessage clears the RequestEventMessage logging mode bit +func (m *ClientLogMode) ClearRequestEventMessage() { + *m &^= LogRequestEventMessage +} + +// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit +func (m *ClientLogMode) ClearResponseEventMessage() { + *m &^= LogResponseEventMessage +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go new file mode 100644 index 000000000..6ecc2231a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go @@ -0,0 +1,95 @@ +//go:build clientlogmode +// +build clientlogmode + +package main + +import ( + "fmt" + "log" + "os" + "strings" + "text/template" +) + +var config = struct { + ModeBits []string +}{ + // Items should be appended only to keep bit-flag positions stable + ModeBits: []string{ + "Signing", + "Retries", + "Request", + "RequestWithBody", + "Response", + "ResponseWithBody", + "DeprecatedUsage", + "RequestEventMessage", + "ResponseEventMessage", + }, +} + +func bitName(name string) string { + return strings.ToUpper(name[:1]) + name[1:] +} + +var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ + "symbolName": func(name string) string { + return "Log" + bitName(name) + }, + "bitName": bitName, +}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( +{{- range $index, $field := .ModeBits }} + {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} +{{- end }} +) +{{ range $_, $field := .ModeBits }} +// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set +func (m ClientLogMode) Is{{- bitName $field }}() bool { + return m&{{- (symbolName $field) }} != 0 +} +{{ end }} +{{- range $_, $field := .ModeBits }} +// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit +func (m *ClientLogMode) Clear{{- bitName $field }}() { + *m &^= {{ (symbolName $field) }} +} +{{ end -}} +`)) + +func main() { + uniqueBitFields := make(map[string]struct{}) + + for _, bitName := range config.ModeBits { + if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { + panic(fmt.Sprintf("duplicate bit field: %s", bitName)) + } + uniqueBitFields[bitName] = struct{}{} + } + + file, err := os.Create("logging.go") + if err != nil { + log.Fatal(err) + } + defer file.Close() + + err = tmpl.Execute(file, config) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go new file mode 100644 index 000000000..e6e87ac77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -0,0 +1,180 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/smithy-go/middleware" +) + +// RegisterServiceMetadata registers metadata about the service and operation into the middleware context +// so that it is available at runtime for other middleware to introspect. +type RegisterServiceMetadata struct { + ServiceID string + SigningName string + Region string + OperationName string +} + +// ID returns the middleware identifier. +func (s *RegisterServiceMetadata) ID() string { + return "RegisterServiceMetadata" +} + +// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. +func (s RegisterServiceMetadata) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { + if len(s.ServiceID) > 0 { + ctx = SetServiceID(ctx, s.ServiceID) + } + if len(s.SigningName) > 0 { + ctx = SetSigningName(ctx, s.SigningName) + } + if len(s.Region) > 0 { + ctx = setRegion(ctx, s.Region) + } + if len(s.OperationName) > 0 { + ctx = setOperationName(ctx, s.OperationName) + } + return next.HandleInitialize(ctx, in) +} + +// service metadata keys for storing and lookup of runtime stack information. +type ( + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} +) + +// GetServiceID retrieves the service id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetServiceID(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) + return v +} + +// GetSigningName retrieves the service signing name from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) + return v +} + +// GetSigningRegion retrieves the region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) + return v +} + +// GetRegion retrieves the endpoint region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) + return v +} + +// GetOperationName retrieves the service operation metadata from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetOperationName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) + return v +} + +// GetPartitionID retrieves the endpoint partition id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPartitionID(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) + return v +} + +// SetSigningName set or modifies the signing name on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingNameKey{}, value) +} + +// SetSigningRegion sets or modifies the region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingRegionKey{}, value) +} + +// SetServiceID sets the service id on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetServiceID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, serviceIDKey{}, value) +} + +// setRegion sets the endpoint region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, regionKey{}, value) +} + +// setOperationName sets the service operation on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setOperationName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, operationNameKey{}, value) +} + +// SetPartitionID sets the partition id of a resolved region on the context +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPartitionID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, partitionIDKey{}, value) +} + +// EndpointSource key +type endpointSourceKey struct{} + +// GetEndpointSource returns an endpoint source if set on context +func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { + v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) + return v +} + +// SetEndpointSource sets endpoint source on context +func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { + return middleware.WithStackValue(ctx, endpointSourceKey{}, value) +} + +type signingCredentialsKey struct{} + +// GetSigningCredentials returns the credentials that were used for signing if set on context. +func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { + v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) + return v +} + +// SetSigningCredentials sets the credentails used for signing on the context. +func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { + return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go new file mode 100644 index 000000000..9bd0dfb15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation +// invocation. +type ClientRequestID struct{} + +// ID the identifier for the ClientRequestID +func (r *ClientRequestID) ID() string { + return "ClientRequestID" +} + +// HandleBuild attaches a unique operation invocation id for the operation to the request +func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() + if err != nil { + return out, metadata, err + } + + const invocationIDHeader = "Amz-Sdk-Invocation-Id" + req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) + + return next.HandleBuild(ctx, in) +} + +// RecordResponseTiming records the response timing for the SDK client requests. +type RecordResponseTiming struct{} + +// ID is the middleware identifier +func (a *RecordResponseTiming) ID() string { + return "RecordResponseTiming" +} + +// HandleDeserialize calculates response metadata and clock skew +func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + responseAt := sdk.NowTime() + setResponseAt(&metadata, responseAt) + + var serverTime time.Time + + switch resp := out.RawResponse.(type) { + case *smithyhttp.Response: + respDateHeader := resp.Header.Get("Date") + if len(respDateHeader) == 0 { + break + } + var parseErr error + serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) + if parseErr != nil { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", + parseErr.Error()) + break + } + setServerTime(&metadata, serverTime) + } + + if !serverTime.IsZero() { + attemptSkew := serverTime.Sub(responseAt) + setAttemptSkew(&metadata, attemptSkew) + } + + return out, metadata, err +} + +type responseAtKey struct{} + +// GetResponseAt returns the time response was received at. +func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(responseAtKey{}).(time.Time) + return v, ok +} + +// setResponseAt sets the response time on the metadata. +func setResponseAt(metadata *middleware.Metadata, v time.Time) { + metadata.Set(responseAtKey{}, v) +} + +type serverTimeKey struct{} + +// GetServerTime returns the server time for response. +func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(serverTimeKey{}).(time.Time) + return v, ok +} + +// setServerTime sets the server time on the metadata. +func setServerTime(metadata *middleware.Metadata, v time.Time) { + metadata.Set(serverTimeKey{}, v) +} + +type attemptSkewKey struct{} + +// GetAttemptSkew returns Attempt clock skew for response from metadata. +func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { + v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) + return v, ok +} + +// setAttemptSkew sets the attempt clock skew on the metadata. +func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { + metadata.Set(attemptSkewKey{}, v) +} + +// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack +func AddClientRequestIDMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ClientRequestID{}, middleware.After) +} + +// AddRecordResponseTiming adds RecordResponseTiming middleware to the +// middleware stack. +func AddRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) +} + +// rawResponseKey is the accessor key used to store and access the +// raw response within the response metadata. +type rawResponseKey struct{} + +// addRawResponse middleware adds raw response on to the metadata +type addRawResponse struct{} + +// ID the identifier for the ClientRequestID +func (m *addRawResponse) ID() string { + return "AddRawResponseToMetadata" +} + +// HandleDeserialize adds raw response on the middleware metadata +func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + metadata.Set(rawResponseKey{}, out.RawResponse) + return out, metadata, err +} + +// AddRawResponseToMetadata adds middleware to the middleware stack that +// store raw response on to the metadata. +func AddRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) +} + +// GetRawResponse returns raw response set on metadata +func GetRawResponse(metadata middleware.Metadata) interface{} { + return metadata.Get(rawResponseKey{}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go new file mode 100644 index 000000000..ba262dadc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + os = "macos" + case "ios": + os = "ios" + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go new file mode 100644 index 000000000..e14a1e4ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go @@ -0,0 +1,24 @@ +//go:build !go1.16 +// +build !go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 + // For now declare this as "other" until we have a better detection mechanism. + fallthrough + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go new file mode 100644 index 000000000..dd3391fe4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/aws/smithy-go/middleware" +) + +// requestIDKey is used to retrieve request id from response metadata +type requestIDKey struct{} + +// SetRequestIDMetadata sets the provided request id over middleware metadata +func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(requestIDKey{}, id) +} + +// GetRequestIDMetadata retrieves the request id from middleware metadata +// returns string and bool indicating value of request id, whether request id was set. +func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(requestIDKey{}) { + return "", false + } + + v, ok := metadata.Get(requestIDKey{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go new file mode 100644 index 000000000..7ce48c611 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddRequestIDRetrieverMiddleware adds request id retriever middleware +func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before operation deserializers so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) +} + +type requestIDRetriever struct { +} + +// ID returns the middleware identifier +func (m *requestIDRetriever) ID() string { + return "RequestIDRetriever" +} + +func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // Different header which can map to request id + requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} + + for _, h := range requestIDHeaderList { + // check for headers known to contain Request id + if v := resp.Header.Get(h); len(v) != 0 { + // set reqID on metadata for successful responses. + SetRequestIDMetadata(&metadata, v) + break + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go new file mode 100644 index 000000000..d5adfec90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -0,0 +1,241 @@ +package middleware + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var languageVersion = strings.TrimPrefix(runtime.Version(), "go") + +// SDKAgentKeyType is the metadata type to add to the SDK agent string +type SDKAgentKeyType int + +// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will +// be mapped to AdditionalMetadata. +const ( + _ SDKAgentKeyType = iota + APIMetadata + OperatingSystemMetadata + LanguageMetadata + EnvironmentMetadata + FeatureMetadata + ConfigMetadata + FrameworkMetadata + AdditionalMetadata + ApplicationIdentifier +) + +func (k SDKAgentKeyType) string() string { + switch k { + case APIMetadata: + return "api" + case OperatingSystemMetadata: + return "os" + case LanguageMetadata: + return "lang" + case EnvironmentMetadata: + return "exec-env" + case FeatureMetadata: + return "ft" + case ConfigMetadata: + return "cfg" + case FrameworkMetadata: + return "lib" + case ApplicationIdentifier: + return "app" + case AdditionalMetadata: + fallthrough + default: + return "md" + } +} + +const execEnvVar = `AWS_EXECUTION_ENV` + +// requestUserAgent is a build middleware that set the User-Agent for the request. +type requestUserAgent struct { + sdkAgent, userAgent *smithyhttp.UserAgentBuilder +} + +// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// request. +// +// User-Agent example: +// aws-sdk-go-v2/1.2.3 +// +// X-Amz-User-Agent example: +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +func newRequestUserAgent() *requestUserAgent { + userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() + addProductName(userAgent) + addProductName(sdkAgent) + + r := &requestUserAgent{ + sdkAgent: sdkAgent, + userAgent: userAgent, + } + + addSDKMetadata(r) + + return r +} + +func addSDKMetadata(r *requestUserAgent) { + r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) + r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) + if ev := os.Getenv(execEnvVar); len(ev) > 0 { + r.AddSDKAgentKey(EnvironmentMetadata, ev) + } +} + +func addProductName(builder *smithyhttp.UserAgentBuilder) { + builder.AddKeyValue(aws.SDKName, aws.SDKVersion) +} + +// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKey(key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKey(key) + return nil + } +} + +// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKeyValue(key, value) + return nil + } +} + +// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKey(keyType, key) + return nil + } +} + +// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) + return nil + } +} + +// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. +func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { + _, err := getOrAddRequestUserAgent(stack) + return err +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { + id := (*requestUserAgent)(nil).ID() + bm, ok := stack.Build.Get(id) + if !ok { + bm = newRequestUserAgent() + err := stack.Build.Add(bm, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := bm.(*requestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) + } + + return requestUserAgent, nil +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKey(key string) { + u.userAgent.AddKey(key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { + u.userAgent.AddKeyValue(key, value) +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { + // TODO: should target sdkAgent + u.userAgent.AddKey(keyType.string() + "/" + key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { + // TODO: should target sdkAgent + u.userAgent.AddKeyValue(keyType.string()+"/"+key, value) +} + +// ID the name of the middleware. +func (u *requestUserAgent) ID() string { + return "UserAgent" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + u.addHTTPUserAgent(req) + // TODO: To be re-enabled + // u.addHTTPSDKAgent(req) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + return next.HandleBuild(ctx, in) +} + +func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { + const userAgent = "User-Agent" + updateHTTPHeader(request, userAgent, u.userAgent.Build()) +} + +func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { + const sdkAgent = "X-Amz-User-Agent" + updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) +} + +func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { + var current string + if v := request.Header[header]; len(v) > 0 { + current = v[0] + } + if len(current) > 0 { + current = value + " " + current + } else { + current = value + } + request.Header[header] = append(request.Header[header][:0], current) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 000000000..77dd4d8db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,61 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + // Query lists start a 1, so adjust the size first + a.size++ + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 000000000..2ecf9241c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 000000000..ab91e357b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 000000000..360344791 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 000000000..debb413de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,56 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 000000000..302525ab1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,106 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go new file mode 100644 index 000000000..c228f7d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -0,0 +1,56 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string + RequestID string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + RequestID: errResponse.RequestID, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + RequestID: errResponse.RequestID, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go new file mode 100644 index 000000000..974ef594f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go @@ -0,0 +1,96 @@ +package ratelimit + +import ( + "sync" +) + +// TokenBucket provides a concurrency safe utility for adding and removing +// tokens from the available token bucket. +type TokenBucket struct { + remainingTokens uint + maxCapacity uint + minCapacity uint + mu sync.Mutex +} + +// NewTokenBucket returns an initialized TokenBucket with the capacity +// specified. +func NewTokenBucket(i uint) *TokenBucket { + return &TokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *TokenBucket) Refund(amount uint) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *TokenBucket) Capacity() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *TokenBucket) Remaining() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *TokenBucket) Resize(size uint) uint { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = uintMax(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} + +func uintMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +func uintMax(a, b uint) uint { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go new file mode 100644 index 000000000..12a3f0c4f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -0,0 +1,87 @@ +package ratelimit + +import ( + "context" + "fmt" +) + +type rateToken struct { + tokenCost uint + bucket *TokenBucket +} + +func (t rateToken) release() error { + t.bucket.Refund(t.tokenCost) + return nil +} + +// TokenRateLimit provides a Token Bucket RateLimiter implementation +// that limits the overall number of retry attempts that can be made across +// operation invocations. +type TokenRateLimit struct { + bucket *TokenBucket +} + +// NewTokenRateLimit returns an TokenRateLimit with default values. +// Functional options can configure the retry rate limiter. +func NewTokenRateLimit(tokens uint) *TokenRateLimit { + return &TokenRateLimit{ + bucket: NewTokenBucket(tokens), + } +} + +func isTimeoutError(error) bool { + return false +} + +type canceledError struct { + Err error +} + +func (c canceledError) CanceledError() bool { return true } +func (c canceledError) Unwrap() error { return c.Err } +func (c canceledError) Error() string { + return fmt.Sprintf("canceled, %v", c.Err) +} + +// GetToken may cause a available pool of retry quota to be +// decremented. Will return an error if the decremented value can not be +// reduced from the retry quota. +func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { + select { + case <-ctx.Done(): + return nil, canceledError{Err: ctx.Err()} + default: + } + if avail, ok := l.bucket.Retrieve(cost); !ok { + return nil, QuotaExceededError{Available: avail, Requested: cost} + } + + return rateToken{ + tokenCost: cost, + bucket: l.bucket, + }.release, nil +} + +// AddTokens increments the token bucket by a fixed amount. +func (l *TokenRateLimit) AddTokens(v uint) error { + l.bucket.Refund(v) + return nil +} + +// Remaining returns the number of remaining tokens in the bucket. +func (l *TokenRateLimit) Remaining() uint { + return l.bucket.Remaining() +} + +// QuotaExceededError provides the SDK error when the retries for a given +// token bucket have been exhausted. +type QuotaExceededError struct { + Available uint + Requested uint +} + +func (e QuotaExceededError) Error() string { + return fmt.Sprintf("retry quota exceeded, %d available, %d requested", + e.Available, e.Requested) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go new file mode 100644 index 000000000..d8d00e615 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go @@ -0,0 +1,25 @@ +package aws + +import ( + "fmt" +) + +// TODO remove replace with smithy.CanceledError + +// RequestCanceledError is the error that will be returned by an API request +// that was canceled. Requests given a Context may return this error when +// canceled. +type RequestCanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*RequestCanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *RequestCanceledError) Unwrap() error { + return e.Err +} +func (e *RequestCanceledError) Error() string { + return fmt.Sprintf("request canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go new file mode 100644 index 000000000..b9fce01d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go @@ -0,0 +1,156 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + // DefaultRequestCost is the cost of a single request from the adaptive + // rate limited token bucket. + DefaultRequestCost uint = 1 +) + +// DefaultThrottles provides the set of errors considered throttle errors that +// are checked by default. +var DefaultThrottles = []IsErrorThrottle{ + ThrottleErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// AdaptiveModeOptions provides the functional options for configuring the +// adaptive retry mode, and delay behavior. +type AdaptiveModeOptions struct { + // If the adaptive token bucket is empty, when an attempt will be made + // AdaptiveMode will sleep until a token is available. This can occur when + // attempts fail with throttle errors. Use this option to disable the sleep + // until token is available, and return error immediately. + FailOnNoAttemptTokens bool + + // The cost of an attempt from the AdaptiveMode's adaptive token bucket. + RequestCost uint + + // Set of strategies to determine if the attempt failed due to a throttle + // error. + // + // It is safe to append to this list in NewAdaptiveMode's functional options. + Throttles []IsErrorThrottle + + // Set of options for standard retry mode that AdaptiveMode is built on top + // of. AdaptiveMode may apply its own defaults to Standard retry mode that + // are different than the defaults of NewStandard. Use these options to + // override the default options. + StandardOptions []func(*StandardOptions) +} + +// AdaptiveMode provides an experimental retry strategy that expands on the +// Standard retry strategy, adding client attempt rate limits. The attempt rate +// limit is initially unrestricted, but becomes restricted when the attempt +// fails with for a throttle error. When restricted AdaptiveMode may need to +// sleep before an attempt is made, if too many throttles have been received. +// AdaptiveMode's sleep can be canceled with context cancel. Set +// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, +// to fail fast. +// +// Eventually unrestricted attempt rate limit will be restored once attempts no +// longer are failing due to throttle errors. +type AdaptiveMode struct { + options AdaptiveModeOptions + throttles IsErrorThrottles + + retryer aws.RetryerV2 + rateLimit *adaptiveRateLimit +} + +// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. +func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { + o := AdaptiveModeOptions{ + RequestCost: DefaultRequestCost, + Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), + } + for _, fn := range optFns { + fn(&o) + } + + return &AdaptiveMode{ + options: o, + throttles: IsErrorThrottles(o.Throttles), + retryer: NewStandard(o.StandardOptions...), + rateLimit: newAdaptiveRateLimit(), + } +} + +// IsErrorRetryable returns if the failed attempt is retryable. This check +// should determine if the error can be retried, or if the error is +// terminal. +func (a *AdaptiveMode) IsErrorRetryable(err error) bool { + return a.retryer.IsErrorRetryable(err) +} + +// MaxAttempts returns the maximum number of attempts that can be made for +// a attempt before failing. A value of 0 implies that the attempt should +// be retried until it succeeds if the errors are retryable. +func (a *AdaptiveMode) MaxAttempts() int { + return a.retryer.MaxAttempts() +} + +// RetryDelay returns the delay that should be used before retrying the +// attempt. Will return error if the if the delay could not be determined. +func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( + time.Duration, error, +) { + return a.retryer.RetryDelay(attempt, opErr) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( + releaseToken func(error) error, err error, +) { + return a.retryer.GetRetryToken(ctx, opErr) +} + +// GetInitialToken returns the initial attempt token that can increment the +// retry token pool if the attempt is successful. +// +// Deprecated: This method does not provide a way to block using Context, +// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only +// present to implement Retryer interface. +func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { + return nopRelease +} + +// GetAttemptToken returns the attempt token that can be used to rate limit +// attempt calls. Will be used by the SDK's retry package's Attempt +// middleware to get a attempt token prior to calling the temp and releasing +// the attempt token after the attempt has been made. +func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { + for { + acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) + if acquiredToken { + break + } + if a.options.FailOnNoAttemptTokens { + return nil, fmt.Errorf( + "unable to get attempt token, and FailOnNoAttemptTokens enables") + } + + if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { + return nil, fmt.Errorf("failed to wait for token to be available, %w", err) + } + } + + return a.handleResponse, nil +} + +func (a *AdaptiveMode) handleResponse(opErr error) error { + throttled := a.throttles.IsErrorThrottle(opErr).Bool() + + a.rateLimit.Update(throttled) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go new file mode 100644 index 000000000..ad96d9b8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go @@ -0,0 +1,158 @@ +package retry + +import ( + "math" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type adaptiveRateLimit struct { + tokenBucketEnabled bool + + smooth float64 + beta float64 + scaleConstant float64 + minFillRate float64 + + fillRate float64 + calculatedRate float64 + lastRefilled time.Time + measuredTxRate float64 + lastTxRateBucket float64 + requestCount int64 + lastMaxRate float64 + lastThrottleTime time.Time + timeWindow float64 + + tokenBucket *adaptiveTokenBucket + + mu sync.Mutex +} + +func newAdaptiveRateLimit() *adaptiveRateLimit { + now := sdk.NowTime() + return &adaptiveRateLimit{ + smooth: 0.8, + beta: 0.7, + scaleConstant: 0.4, + + minFillRate: 0.5, + + lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), + lastThrottleTime: now, + + tokenBucket: newAdaptiveTokenBucket(0), + } +} + +func (a *adaptiveRateLimit) Enable(v bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.tokenBucketEnabled = v +} + +func (a *adaptiveRateLimit) AcquireToken(amount uint) ( + tokenAcquired bool, waitTryAgain time.Duration, +) { + a.mu.Lock() + defer a.mu.Unlock() + + if !a.tokenBucketEnabled { + return true, 0 + } + + a.tokenBucketRefill() + + available, ok := a.tokenBucket.Retrieve(float64(amount)) + if !ok { + waitDur := float64Seconds((float64(amount) - available) / a.fillRate) + return false, waitDur + } + + return true, 0 +} + +func (a *adaptiveRateLimit) Update(throttled bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.updateMeasuredRate() + + if throttled { + rateToUse := a.measuredTxRate + if a.tokenBucketEnabled { + rateToUse = math.Min(a.measuredTxRate, a.fillRate) + } + + a.lastMaxRate = rateToUse + a.calculateTimeWindow() + a.lastThrottleTime = sdk.NowTime() + a.calculatedRate = a.cubicThrottle(rateToUse) + a.tokenBucketEnabled = true + } else { + a.calculateTimeWindow() + a.calculatedRate = a.cubicSuccess(sdk.NowTime()) + } + + newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) + a.tokenBucketUpdateRate(newRate) +} + +func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { + dt := secondsFloat64(t.Sub(a.lastThrottleTime)) + return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate +} + +func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { + return rateToUse * a.beta +} + +func (a *adaptiveRateLimit) calculateTimeWindow() { + a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) +} + +func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { + a.tokenBucketRefill() + a.fillRate = math.Max(newRPS, a.minFillRate) + a.tokenBucket.Resize(newRPS) +} + +func (a *adaptiveRateLimit) updateMeasuredRate() { + now := sdk.NowTime() + timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. + a.requestCount++ + + if timeBucket > a.lastTxRateBucket { + currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) + a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) + a.requestCount = 0 + a.lastTxRateBucket = timeBucket + } +} + +func (a *adaptiveRateLimit) tokenBucketRefill() { + now := sdk.NowTime() + if a.lastRefilled.IsZero() { + a.lastRefilled = now + return + } + + fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate + a.tokenBucket.Refund(fillAmount) + a.lastRefilled = now +} + +func float64Seconds(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +func secondsFloat64(v time.Duration) float64 { + return float64(v) / float64(time.Second) +} + +func timeFloat64Seconds(v time.Time) float64 { + return float64(v.UnixNano()) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go new file mode 100644 index 000000000..052723e8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go @@ -0,0 +1,83 @@ +package retry + +import ( + "math" + "sync" +) + +// adaptiveTokenBucket provides a concurrency safe utility for adding and +// removing tokens from the available token bucket. +type adaptiveTokenBucket struct { + remainingTokens float64 + maxCapacity float64 + minCapacity float64 + mu sync.Mutex +} + +// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the +// capacity specified. +func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { + return &adaptiveTokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *adaptiveTokenBucket) Refund(amount float64) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *adaptiveTokenBucket) Capacity() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *adaptiveTokenBucket) Remaining() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *adaptiveTokenBucket) Resize(size float64) float64 { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = math.Max(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go new file mode 100644 index 000000000..42ced06e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -0,0 +1,80 @@ +// Package retry provides interfaces and implementations for SDK request retry behavior. +// +// Retryer Interface and Implementations +// +// This packages defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This packages provides a single +// retry implementations: Standard. +// +// Standard +// +// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited +// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. +// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, +// and uses an additional delay policy to limit the time between a requests subsequent attempts. +// +// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether +// a given error is retryable. By default this list of retryables includes the following: +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete +// +// The standard retryer will not retry a request in the event if the context associated with the request +// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context +// value. +// +// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer +// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions +// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, +// and the retry delay policy. +// +// For example to modify the default retry attempts for the standard retryer: +// +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) +// +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) +// +// Utilities +// +// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic +// way. These are: +// +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. +// +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. +// +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. +// +// The following package functions have been provided to easily satisfy different retry interfaces to further customize +// a given retryer's behavior: +// +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. +// +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic ot determine if a +// error should be retried. +// +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. +package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go new file mode 100644 index 000000000..3e432eefe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go @@ -0,0 +1,20 @@ +package retry + +import "fmt" + +// MaxAttemptsError provides the error when the maximum number of attempts have +// been exceeded. +type MaxAttemptsError struct { + Attempt int + Err error +} + +func (e *MaxAttemptsError) Error() string { + return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) +} + +// Unwrap returns the nested error causing the max attempts error. Provides the +// implementation for errors.Is and errors.As to unwrap nested errors. +func (e *MaxAttemptsError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go new file mode 100644 index 000000000..c266996de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go @@ -0,0 +1,49 @@ +package retry + +import ( + "math" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/timeconv" +) + +// ExponentialJitterBackoff provides backoff delays with jitter based on the +// number of attempts. +type ExponentialJitterBackoff struct { + maxBackoff time.Duration + // precomputed number of attempts needed to reach max backoff. + maxBackoffAttempts float64 + + randFloat64 func() (float64, error) +} + +// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured +// for the max backoff. +func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { + return &ExponentialJitterBackoff{ + maxBackoff: maxBackoff, + maxBackoffAttempts: math.Log2( + float64(maxBackoff) / float64(time.Second)), + randFloat64: rand.CryptoRandFloat64, + } +} + +// BackoffDelay returns the duration to wait before the next attempt should be +// made. Returns an error if unable get a duration. +func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + if attempt > int(j.maxBackoffAttempts) { + return j.maxBackoff, nil + } + + b, err := j.randFloat64() + if err != nil { + return 0, err + } + + // [0.0, 1.0) * 2 ^ attempts + ri := int64(1 << uint64(attempt)) + delaySeconds := b * float64(ri) + + return timeconv.FloatSecondsDur(delaySeconds), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go new file mode 100644 index 000000000..7a3f18301 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go @@ -0,0 +1,52 @@ +package retry + +import ( + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// attemptResultsKey is a metadata accessor key to retrieve metadata +// for all request attempts. +type attemptResultsKey struct { +} + +// GetAttemptResults retrieves attempts results from middleware metadata. +func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { + m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) + return m, ok +} + +// AttemptResults represents struct containing metadata returned by all request attempts. +type AttemptResults struct { + + // Results is a slice consisting attempt result from all request attempts. + // Results are stored in order request attempt is made. + Results []AttemptResult +} + +// AttemptResult represents attempt result returned by a single request attempt. +type AttemptResult struct { + + // Err is the error if received for the request attempt. + Err error + + // Retryable denotes if request may be retried. This states if an + // error is considered retryable. + Retryable bool + + // Retried indicates if this request was retried. + Retried bool + + // ResponseMetadata is any existing metadata passed via the response middlewares. + ResponseMetadata middleware.Metadata +} + +// addAttemptResults adds attempt results to middleware metadata +func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { + metadata.Set(attemptResultsKey{}, v) +} + +// GetRawResponse returns raw response recorded for the attempt result +func (a AttemptResult) GetRawResponse() interface{} { + return awsmiddle.GetRawResponse(a.ResponseMetadata) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go new file mode 100644 index 000000000..926f5f5e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -0,0 +1,331 @@ +package retry + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// RequestCloner is a function that can take an input request type and clone +// the request for use in a subsequent retry attempt. +type RequestCloner func(interface{}) interface{} + +type retryMetadata struct { + AttemptNum int + AttemptTime time.Time + MaxAttempts int + AttemptClockSkew time.Duration +} + +// Attempt is a Smithy Finalize middleware that handles retry attempts using +// the provided Retryer implementation. +type Attempt struct { + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogAttempts bool + + retryer aws.RetryerV2 + requestCloner RequestCloner +} + +// NewAttemptMiddleware returns a new Attempt retry middleware. +func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { + m := &Attempt{ + retryer: wrapAsRetryerV2(retryer), + requestCloner: requestCloner, + } + for _, fn := range optFns { + fn(m) + } + return m +} + +// ID returns the middleware identifier +func (r *Attempt) ID() string { return "Retry" } + +func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { + if !r.LogAttempts { + return + } + logger.Logf(classification, format, v...) +} + +// HandleFinalize utilizes the provider Retryer implementation to attempt +// retries over the next handler +func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + var attemptNum int + var attemptClockSkew time.Duration + var attemptResults AttemptResults + + maxAttempts := r.retryer.MaxAttempts() + releaseRetryToken := nopRelease + + for { + attemptNum++ + attemptInput := in + attemptInput.Request = r.requestCloner(attemptInput.Request) + + // Record the metadata for the for attempt being started. + attemptCtx := setRetryMetadata(ctx, retryMetadata{ + AttemptNum: attemptNum, + AttemptTime: sdk.NowTime().UTC(), + MaxAttempts: maxAttempts, + AttemptClockSkew: attemptClockSkew, + }) + + var attemptResult AttemptResult + out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) + + // AttempResult Retried states that the attempt was not successful, and + // should be retried. + shouldRetry := attemptResult.Retried + + // Add attempt metadata to list of all attempt metadata + attemptResults.Results = append(attemptResults.Results, attemptResult) + + if !shouldRetry { + // Ensure the last response's metadata is used as the bases for result + // metadata returned by the stack. The Slice of attempt results + // will be added to this cloned metadata. + metadata = attemptResult.ResponseMetadata.Clone() + + break + } + } + + addAttemptResults(&metadata, attemptResults) + return out, metadata, err +} + +// handleAttempt handles an individual request attempt. +func (r *Attempt) handleAttempt( + ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, +) ( + out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, +) { + defer func() { + attemptResult.Err = err + }() + + // Short circuit if this attempt never can succeed because the context is + // canceled. This reduces the chance of token pools being modified for + // attempts that will not be made + select { + case <-ctx.Done(): + return out, attemptResult, nopRelease, ctx.Err() + default: + } + + //------------------------------ + // Get Attempt Token + //------------------------------ + releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) + if err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to get retry Send token, %w", err) + } + + //------------------------------ + // Send Attempt + //------------------------------ + logger := smithymiddle.GetLogger(ctx) + service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) + retryMetadata, _ := getRetryMetadata(ctx) + attemptNum := retryMetadata.AttemptNum + maxAttempts := retryMetadata.MaxAttempts + + // Following attempts must ensure the request payload stream starts in a + // rewound state. + if attemptNum > 1 { + if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { + if rewindErr := rewindable.RewindStream(); rewindErr != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to rewind transport stream for retry, %w", rewindErr) + } + } + + r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", + service, operation, attemptNum) + } + + var metadata smithymiddle.Metadata + out, metadata, err = next.HandleFinalize(ctx, in) + attemptResult.ResponseMetadata = metadata + + //------------------------------ + // Bookkeeping + //------------------------------ + // Release the retry token based on the state of the attempt's error (if any). + if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release retry token after request error, %w", err) + } + // Release the attempt token based on the state of the attempt's error (if any). + if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release initial token after request error, %w", err) + } + // If there was no error making the attempt, nothing further to do. There + // will be nothing to retry. + if err == nil { + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Is Retryable and Should Retry + //------------------------------ + // If the attempt failed with an unretryable error, nothing further to do + // but return, and inform the caller about the terminal failure. + retryable := r.retryer.IsErrorRetryable(err) + if !retryable { + r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) + return out, attemptResult, nopRelease, err + } + + // set retryable to true + attemptResult.Retryable = true + + // Once the maximum number of attempts have been exhausted there is nothing + // further to do other than inform the caller about the terminal failure. + if maxAttempts > 0 && attemptNum >= maxAttempts { + r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) + err = &MaxAttemptsError{ + Attempt: attemptNum, + Err: err, + } + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Get Retry (aka Retry Quota) Token + //------------------------------ + // Get a retry token that will be released after the + releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) + if retryTokenErr != nil { + return out, attemptResult, nopRelease, retryTokenErr + } + + //------------------------------ + // Retry Delay and Sleep + //------------------------------ + // Get the retry delay before another attempt can be made, and sleep for + // that time. Potentially early exist if the sleep is canceled via the + // context. + retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + if reqErr != nil { + return out, attemptResult, releaseRetryToken, reqErr + } + if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { + err = &aws.RequestCanceledError{Err: reqErr} + return out, attemptResult, releaseRetryToken, err + } + + // The request should be re-attempted. + attemptResult.Retried = true + + return out, attemptResult, releaseRetryToken, err +} + +// MetricsHeader attaches SDK request metric header for retries to the transport +type MetricsHeader struct{} + +// ID returns the middleware identifier +func (r *MetricsHeader) ID() string { + return "RetryMetricsHeader" +} + +// HandleFinalize attaches the SDK request metric header to the transport layer +func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + retryMetadata, _ := getRetryMetadata(ctx) + + const retryMetricHeader = "Amz-Sdk-Request" + var parts []string + + parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) + if retryMetadata.MaxAttempts != 0 { + parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) + } + + var ttl time.Time + if deadline, ok := ctx.Deadline(); ok { + ttl = deadline + } + + // Only append the TTL if it can be determined. + if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { + const unixTimeFormat = "20060102T150405Z" + ttl = ttl.Add(retryMetadata.AttemptClockSkew) + parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) + } + + switch req := in.Request.(type) { + case *http.Request: + req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + return next.HandleFinalize(ctx, in) +} + +type retryMetadataKey struct{} + +// getRetryMetadata retrieves retryMetadata from the context and a bool +// indicating if it was set. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { + metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + return metadata, ok +} + +// setRetryMetadata sets the retryMetadata on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { + return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata) +} + +// AddRetryMiddlewaresOptions is the set of options that can be passed to +// AddRetryMiddlewares for configuring retry associated middleware. +type AddRetryMiddlewaresOptions struct { + Retryer aws.Retryer + + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogRetryAttempts bool +} + +// AddRetryMiddlewares adds retry middleware to operation middleware stack +func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { + attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { + middleware.LogAttempts = options.LogRetryAttempts + }) + + if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + return err + } + if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go new file mode 100644 index 000000000..af81635b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go @@ -0,0 +1,90 @@ +package retry + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// AddWithErrorCodes returns a Retryer with additional error codes considered +// for determining if the error should be retried. +func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { + retryable := &RetryableErrorCode{ + Codes: map[string]struct{}{}, + } + for _, c := range codes { + retryable.Codes[c] = struct{}{} + } + + return &withIsErrorRetryable{ + RetryerV2: wrapAsRetryerV2(r), + Retryable: retryable, + } +} + +type withIsErrorRetryable struct { + aws.RetryerV2 + Retryable IsErrorRetryable +} + +func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { + if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { + return v.Bool() + } + return r.RetryerV2.IsErrorRetryable(err) +} + +// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value +// specified. +func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { + return &withMaxAttempts{ + RetryerV2: wrapAsRetryerV2(r), + Max: max, + } +} + +type withMaxAttempts struct { + aws.RetryerV2 + Max int +} + +func (w *withMaxAttempts) MaxAttempts() int { + return w.Max +} + +// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer +// overriding the RetryDelay behavior for a alternate minimum initial backoff +// delay. +func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { + return &withMaxBackoffDelay{ + RetryerV2: wrapAsRetryerV2(r), + backoff: NewExponentialJitterBackoff(delay), + } +} + +type withMaxBackoffDelay struct { + aws.RetryerV2 + backoff *ExponentialJitterBackoff +} + +func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { + return r.backoff.BackoffDelay(attempt, err) +} + +type wrappedAsRetryerV2 struct { + aws.Retryer +} + +func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { + v, ok := r.(aws.RetryerV2) + if !ok { + v = wrappedAsRetryerV2{Retryer: r} + } + + return v +} + +func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { + return w.Retryer.GetInitialToken(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go new file mode 100644 index 000000000..c695e6fe5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -0,0 +1,186 @@ +package retry + +import ( + "errors" + "net" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorRetryable provides the interface of an implementation to determine if +// a error as the result of an operation is retryable. +type IsErrorRetryable interface { + IsErrorRetryable(error) aws.Ternary +} + +// IsErrorRetryables is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorRetryables []IsErrorRetryable + +// IsErrorRetryable returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. +type IsErrorRetryableFunc func(error) aws.Ternary + +// IsErrorRetryable returns if the error is retryable. +func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { + return fn(err) +} + +// RetryableError is an IsErrorRetryable implementation which uses the +// optional interface Retryable on the error value to determine if the error is +// retryable. +type RetryableError struct{} + +// IsErrorRetryable returns if the error is retryable if it satisfies the +// Retryable interface, and returns if the attempt should be retried. +func (RetryableError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ RetryableError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.RetryableError()) +} + +// NoRetryCanceledError detects if the error was an request canceled error and +// returns if so. +type NoRetryCanceledError struct{} + +// IsErrorRetryable returns the error is not retryable if the request was +// canceled. +func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ CanceledError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + if v.CanceledError() { + return aws.FalseTernary + } + return aws.UnknownTernary +} + +// RetryableConnectionError determines if the underlying error is an HTTP +// connection and returns if it should be retried. +// +// Includes errors such as connection reset, connection refused, net dial, +// temporary, and timeout errors. +type RetryableConnectionError struct{} + +// IsErrorRetryable returns if the error is caused by and HTTP connection +// error, and should be retried. +func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { + if err == nil { + return aws.UnknownTernary + } + var retryable bool + + var conErr interface{ ConnectionError() bool } + var tempErr interface{ Temporary() bool } + var timeoutErr interface{ Timeout() bool } + var urlErr *url.Error + var netOpErr *net.OpError + + switch { + case errors.As(err, &conErr) && conErr.ConnectionError(): + retryable = true + + case strings.Contains(err.Error(), "connection reset"): + retryable = true + + case errors.As(err, &urlErr): + // Refused connections should be retried as the service may not yet be + // running on the port. Go TCP dial considers refused connections as + // not temporary. + if strings.Contains(urlErr.Error(), "connection refused") { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(urlErr)) + } + + case errors.As(err, &netOpErr): + // Network dial, or temporary network errors are always retryable. + if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(netOpErr)) + } + + case errors.As(err, &tempErr) && tempErr.Temporary(): + // Fallback to the generic temporary check, with temporary errors + // retryable. + retryable = true + + case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): + // Fallback to the generic timeout check, with timeout errors + // retryable. + retryable = true + + default: + return aws.UnknownTernary + } + + return aws.BoolTernary(retryable) + +} + +// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status +// codes. +type RetryableHTTPStatusCode struct { + Codes map[int]struct{} +} + +// IsErrorRetryable return if the passed in error is retryable based on the +// HTTP status code. +func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ HTTPStatusCode() int } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.HTTPStatusCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} + +// RetryableErrorCode determines if an attempt should be retried based on the +// API error code. +type RetryableErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorRetryable return if the error is retryable based on the error codes. +// Returns unknown if the error doesn't have a code or it is unknown. +func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go new file mode 100644 index 000000000..25abffc81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -0,0 +1,258 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" +) + +// BackoffDelayer provides the interface for determining the delay to before +// another request attempt, that previously failed. +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +// BackoffDelayerFunc provides a wrapper around a function to determine the +// backoff delay of an attempt retry. +type BackoffDelayerFunc func(int, error) (time.Duration, error) + +// BackoffDelay returns the delay before attempt to retry a request. +func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { + return fn(attempt, err) +} + +const ( + // DefaultMaxAttempts is the maximum of attempts for an API request + DefaultMaxAttempts int = 3 + + // DefaultMaxBackoff is the maximum back off delay between attempts + DefaultMaxBackoff time.Duration = 20 * time.Second +) + +// Default retry token quota values. +const ( + DefaultRetryRateTokens uint = 500 + DefaultRetryCost uint = 5 + DefaultRetryTimeoutCost uint = 10 + DefaultNoRetryIncrement uint = 1 +) + +// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK +// should consider as retryable errors. +var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ + 500: {}, + 502: {}, + 503: {}, + 504: {}, +} + +// DefaultRetryableErrorCodes provides the set of API error codes that should +// be retried. +var DefaultRetryableErrorCodes = map[string]struct{}{ + "RequestTimeout": {}, + "RequestTimeoutException": {}, +} + +// DefaultThrottleErrorCodes provides the set of API error codes that are +// considered throttle errors. +var DefaultThrottleErrorCodes = map[string]struct{}{ + "Throttling": {}, + "ThrottlingException": {}, + "ThrottledException": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, + "ProvisionedThroughputExceededException": {}, + "TransactionInProgressException": {}, + "RequestLimitExceeded": {}, + "BandwidthLimitExceeded": {}, + "LimitExceededException": {}, + "RequestThrottled": {}, + "SlowDown": {}, + "PriorRequestNotComplete": {}, + "EC2ThrottledException": {}, +} + +// DefaultRetryables provides the set of retryable checks that are used by +// default. +var DefaultRetryables = []IsErrorRetryable{ + NoRetryCanceledError{}, + RetryableError{}, + RetryableConnectionError{}, + RetryableHTTPStatusCode{ + Codes: DefaultRetryableHTTPStatusCodes, + }, + RetryableErrorCode{ + Codes: DefaultRetryableErrorCodes, + }, + RetryableErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// DefaultTimeouts provides the set of timeout checks that are used by default. +var DefaultTimeouts = []IsErrorTimeout{ + TimeouterError{}, +} + +// StandardOptions provides the functional options for configuring the standard +// retryable, and delay behavior. +type StandardOptions struct { + // Maximum number of attempts that should be made. + MaxAttempts int + + // MaxBackoff duration between retried attempts. + MaxBackoff time.Duration + + // Provides the backoff strategy the retryer will use to determine the + // delay between retry attempts. + Backoff BackoffDelayer + + // Set of strategies to determine if the attempt should be retried based on + // the error response received. + // + // It is safe to append to this list in NewStandard's functional options. + Retryables []IsErrorRetryable + + // Set of strategies to determine if the attempt failed due to a timeout + // error. + // + // It is safe to append to this list in NewStandard's functional options. + Timeouts []IsErrorTimeout + + // Provides the rate limiting strategy for rate limiting attempt retries + // across all attempts the retryer is being used with. + RateLimiter RateLimiter + + // The cost to deduct from the RateLimiter's token bucket per retry. + RetryCost uint + + // The cost to deduct from the RateLimiter's token bucket per retry caused + // by timeout error. + RetryTimeoutCost uint + + // The cost to payback to the RateLimiter's token bucket for successful + // attempts. + NoRetryIncrement uint +} + +// RateLimiter provides the interface for limiting the rate of attempt retries +// allowed by the retryer. +type RateLimiter interface { + GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) + AddTokens(uint) error +} + +// Standard is the standard retry pattern for the SDK. It uses a set of +// retryable checks to determine of the failed attempt should be retried, and +// what retry delay should be used. +type Standard struct { + options StandardOptions + + timeout IsErrorTimeout + retryable IsErrorRetryable + backoff BackoffDelayer +} + +// NewStandard initializes a standard retry behavior with defaults that can be +// overridden via functional options. +func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { + o := StandardOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), + Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), + + RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), + RetryCost: DefaultRetryCost, + RetryTimeoutCost: DefaultRetryTimeoutCost, + NoRetryIncrement: DefaultNoRetryIncrement, + } + for _, fn := range fnOpts { + fn(&o) + } + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + backoff := o.Backoff + if backoff == nil { + backoff = NewExponentialJitterBackoff(o.MaxBackoff) + } + + return &Standard{ + options: o, + backoff: backoff, + retryable: IsErrorRetryables(o.Retryables), + timeout: IsErrorTimeouts(o.Timeouts), + } +} + +// MaxAttempts returns the maximum number of attempts that can be made for a +// request before failing. +func (s *Standard) MaxAttempts() int { + return s.options.MaxAttempts +} + +// IsErrorRetryable returns if the error is can be retried or not. Should not +// consider the number of attempts made. +func (s *Standard) IsErrorRetryable(err error) bool { + return s.retryable.IsErrorRetryable(err).Bool() +} + +// RetryDelay returns the delay to use before another request attempt is made. +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} + +// GetAttemptToken returns the token to be released after then attempt completes. +// The release token will add NoRetryIncrement to the RateLimiter token pool if +// the attempt was successful. If the attempt failed, nothing will be done. +func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { + return s.GetInitialToken(), nil +} + +// GetInitialToken returns a token for adding the NoRetryIncrement to the +// RateLimiter token if the attempt completed successfully without error. +// +// InitialToken applies to result of the each attempt, including the first. +// Whereas the RetryToken applies to the result of subsequent attempts. +// +// Deprecated: use GetAttemptToken instead. +func (s *Standard) GetInitialToken() func(error) error { + return releaseToken(s.noRetryIncrement).release +} + +func (s *Standard) noRetryIncrement() error { + return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { + cost := s.options.RetryCost + + if s.timeout.IsErrorTimeout(opErr).Bool() { + cost = s.options.RetryTimeoutCost + } + + fn, err := s.options.RateLimiter.GetToken(ctx, cost) + if err != nil { + return nil, fmt.Errorf("failed to get rate limit token, %w", err) + } + + return releaseToken(fn).release, nil +} + +func nopRelease(error) error { return nil } + +type releaseToken func() error + +func (f releaseToken) release(err error) error { + if err != nil { + return nil + } + + return f() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go new file mode 100644 index 000000000..c4b844d15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go @@ -0,0 +1,60 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorThrottle provides the interface of an implementation to determine if +// a error response from an operation is a throttling error. +type IsErrorThrottle interface { + IsErrorThrottle(error) aws.Ternary +} + +// IsErrorThrottles is a collection of checks to determine of the error a +// throttle error. Iterates through the checks and returns the state of +// throttle if any check returns something other than unknown. +type IsErrorThrottles []IsErrorThrottle + +// IsErrorThrottle returns if the error is a throttle error if any of the +// checks in the list return a value other than unknown. +func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. +type IsErrorThrottleFunc func(error) aws.Ternary + +// IsErrorThrottle returns if the error is a throttle error. +func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { + return fn(err) +} + +// ThrottleErrorCode determines if an attempt should be retried based on the +// API error code. +type ThrottleErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorThrottle return if the error is a throttle error based on the error +// codes. Returns unknown if the error doesn't have a code or it is unknown. +func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go new file mode 100644 index 000000000..3d47870d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go @@ -0,0 +1,52 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorTimeout provides the interface of an implementation to determine if +// a error matches. +type IsErrorTimeout interface { + IsErrorTimeout(err error) aws.Ternary +} + +// IsErrorTimeouts is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorTimeouts []IsErrorTimeout + +// IsErrorTimeout returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { + for _, t := range ts { + if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. +type IsErrorTimeoutFunc func(error) aws.Ternary + +// IsErrorTimeout returns if the error is retryable. +func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { + return fn(err) +} + +// TimeouterError provides the IsErrorTimeout implementation for determining if +// an error is a timeout based on type with the Timeout method. +type TimeouterError struct{} + +// IsErrorTimeout returns if the error is a timeout error. +func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { + var v interface{ Timeout() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.Timeout()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go new file mode 100644 index 000000000..1e378f86a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" +) + +// RetryMode provides the mode the API client will use to create a retryer +// based on. +type RetryMode string + +const ( + // RetryModeStandard model provides rate limited retry attempts with + // exponential backoff delay. + RetryModeStandard RetryMode = "standard" + + // RetryModeAdaptive model provides attempt send rate limiting on throttle + // responses in addition to standard mode's retry rate limiting. + // + // Adaptive retry mode is experimental and is subject to change in the + // future. + RetryModeAdaptive RetryMode = "adaptive" +) + +// ParseRetryMode attempts to parse a RetryMode from the given string. +// Returning error if the value is not a known RetryMode. +func ParseRetryMode(v string) (mode RetryMode, err error) { + switch v { + case "standard": + return RetryModeStandard, nil + case "adaptive": + return RetryModeAdaptive, nil + default: + return mode, fmt.Errorf("unknown RetryMode, %v", v) + } +} + +func (m RetryMode) String() string { return string(m) } + +// Retryer is an interface to determine if a given error from a +// attempt should be retried, and if so what backoff delay to apply. The +// default implementation used by most services is the retry package's Standard +// type. Which contains basic retry logic using exponential backoff. +type Retryer interface { + // IsErrorRetryable returns if the failed attempt is retryable. This check + // should determine if the error can be retried, or if the error is + // terminal. + IsErrorRetryable(error) bool + + // MaxAttempts returns the maximum number of attempts that can be made for + // a attempt before failing. A value of 0 implies that the attempt should + // be retried until it succeeds if the errors are retryable. + MaxAttempts() int + + // RetryDelay returns the delay that should be used before retrying the + // attempt. Will return error if the if the delay could not be determined. + RetryDelay(attempt int, opErr error) (time.Duration, error) + + // GetRetryToken attempts to deduct the retry cost from the retry token pool. + // Returning the token release function, or error. + GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + GetInitialToken() (releaseToken func(error) error) +} + +// RetryerV2 is an interface to determine if a given error from a attempt +// should be retried, and if so what backoff delay to apply. The default +// implementation used by most services is the retry package's Standard type. +// Which contains basic retry logic using exponential backoff. +// +// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken +// method in favor of GetAttemptToken which takes a context, and can return an error. +// +// The SDK's retry package's Attempt middleware, and utilities will always +// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if +// GetAttemptToken is not implemented. +type RetryerV2 interface { + Retryer + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + // + // Deprecated: This method does not provide a way to block using Context, + // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. + GetInitialToken() (releaseToken func(error) error) + + // GetAttemptToken returns the send token that can be used to rate limit + // attempt calls. Will be used by the SDK's retry package's Attempt + // middleware to get a send token prior to calling the temp and releasing + // the send token after the attempt has been made. + GetAttemptToken(context.Context) (func(error) error, error) +} + +// NopRetryer provides a RequestRetryDecider implementation that will flag +// all attempt errors as not retryable, with a max attempts of 1. +type NopRetryer struct{} + +// IsErrorRetryable returns false for all error values. +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +// MaxAttempts always returns 1 for the original attempt. +func (NopRetryer) MaxAttempts() int { return 1 } + +// RetryDelay is not valid for the NopRetryer. Will always return error. +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} + +// GetRetryToken returns a stub function that does nothing. +func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { + return nopReleaseToken, nil +} + +// GetInitialToken returns a stub function that does nothing. +func (NopRetryer) GetInitialToken() func(error) error { + return nopReleaseToken +} + +// GetAttemptToken returns a stub function that does nothing. +func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { + return nopReleaseToken, nil +} + +func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go new file mode 100644 index 000000000..3af9b2b33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go @@ -0,0 +1,14 @@ +package aws + +// ExecutionEnvironmentID is the AWS execution environment runtime identifier. +type ExecutionEnvironmentID string + +// RuntimeEnvironment is a collection of values that are determined at runtime +// based on the environment that the SDK is executing in. Some of these values +// may or may not be present based on the executing environment and certain SDK +// configuration properties that drive whether these values are populated.. +type RuntimeEnvironment struct { + EnvironmentIdentifier ExecutionEnvironmentID + Region string + EC2InstanceMetadataRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go new file mode 100644 index 000000000..cbf22f1d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go @@ -0,0 +1,115 @@ +package v4 + +import ( + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +func lookupKey(service, region string) string { + var s strings.Builder + s.Grow(len(region) + len(service) + 3) + s.WriteString(region) + s.WriteRune('/') + s.WriteString(service) + return s.String() +} + +type derivedKey struct { + AccessKey string + Date time.Time + Credential []byte +} + +type derivedKeyCache struct { + values map[string]derivedKey + mutex sync.RWMutex +} + +func newDerivedKeyCache() derivedKeyCache { + return derivedKeyCache{ + values: make(map[string]derivedKey), + } +} + +func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { + key := lookupKey(service, region) + s.mutex.RLock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.RUnlock() + return cred + } + s.mutex.RUnlock() + + s.mutex.Lock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.Unlock() + return cred + } + cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) + entry := derivedKey{ + AccessKey: credentials.AccessKeyID, + Date: signingTime.Time, + Credential: cred, + } + s.values[key] = entry + s.mutex.Unlock() + + return cred +} + +func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { + cacheEntry, ok := s.retrieveFromCache(key) + if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { + return cacheEntry.Credential, true + } + return nil, false +} + +func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { + if v, ok := s.values[key]; ok { + return v, true + } + return derivedKey{}, false +} + +// SigningKeyDeriver derives a signing key from a set of credentials +type SigningKeyDeriver struct { + cache derivedKeyCache +} + +// NewSigningKeyDeriver returns a new SigningKeyDeriver +func NewSigningKeyDeriver() *SigningKeyDeriver { + return &SigningKeyDeriver{ + cache: newDerivedKeyCache(), + } +} + +// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. +func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { + return k.cache.Get(credential, service, region, signingTime) +} + +func deriveKey(secret, service, region string, t SigningTime) []byte { + hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) + hmacRegion := HMACSHA256(hmacDate, []byte(region)) + hmacService := HMACSHA256(hmacRegion, []byte(service)) + return HMACSHA256(hmacService, []byte("aws4_request")) +} + +func isSameDay(x, y time.Time) bool { + xYear, xMonth, xDay := x.Date() + yYear, yMonth, yDay := y.Date() + + if xYear != yYear { + return false + } + + if xMonth != yMonth { + return false + } + + return xDay == yDay +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go new file mode 100644 index 000000000..a23cb003b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go @@ -0,0 +1,40 @@ +package v4 + +// Signature Version 4 (SigV4) Constants +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" + + // StreamingEventsPayload indicates that the request payload body is a signed event stream. + StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go new file mode 100644 index 000000000..c61955ad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for include listing +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// ExcludeList is a generic Rule for exclude listing +type ExcludeList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b ExcludeList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go new file mode 100644 index 000000000..85a1d8f03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -0,0 +1,68 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + ExcludeList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a allow list for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Object-Lock-"}, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + ExcludeList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go new file mode 100644 index 000000000..e7fa7a1b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go new file mode 100644 index 000000000..bf93659a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go new file mode 100644 index 000000000..fc7887909 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go @@ -0,0 +1,13 @@ +package v4 + +import "strings" + +// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope +func BuildCredentialScope(signingTime SigningTime, region, service string) string { + return strings.Join([]string{ + signingTime.ShortTimeFormat(), + region, + service, + "aws4_request", + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go new file mode 100644 index 000000000..1de06a765 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go new file mode 100644 index 000000000..0cb9cffaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -0,0 +1,64 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL +func GetURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go new file mode 100644 index 000000000..3f3bcf456 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -0,0 +1,400 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const computePayloadHashMiddlewareID = "ComputePayloadHash" + +// HashComputationError indicates an error occurred while computing the signing hash +type HashComputationError struct { + Err error +} + +// Error is the error message +func (e *HashComputationError) Error() string { + return fmt.Sprintf("failed to compute payload hash: %v", e.Err) +} + +// Unwrap returns the underlying error if one is set +func (e *HashComputationError) Unwrap() error { + return e.Err +} + +// SigningError indicates an error condition occurred while performing SigV4 signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} + +// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that +// switches between unsigned and signed payload based on TLS state for request. +// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. +// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . +// +// Usage example - +// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to +// dynamically switch between unsigned and signed payload based on TLS state for request. +func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + return err +} + +// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. +type dynamicPayloadSigningMiddleware struct { +} + +// ID returns the resolver identifier +func (m *dynamicPayloadSigningMiddleware) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets a resolver that directs to the payload sha256 compute handler. +func (m *dynamicPayloadSigningMiddleware) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + // if TLS is enabled, use unsigned payload when supported + if strings.EqualFold(req.URL.Scheme, "https") { + return (&unsignedPayload{}).HandleBuild(ctx, in, next) + } + + // else fall back to signed payload + return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) +} + +// unsignedPayload sets the SigV4 request payload hash to unsigned. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type unsignedPayload struct{} + +// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation +// middleware stack +func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&unsignedPayload{}, middleware.After) +} + +// ID returns the unsignedPayload identifier +func (m *unsignedPayload) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets the payload hash to be an unsigned payload +func (m *unsignedPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call). + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.UnsignedPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + return next.HandleBuild(ctx, in) +} + +// computePayloadSHA256 computes SHA256 payload hash to sign. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type computePayloadSHA256 struct{} + +// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the +// operation middleware stack +func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { + return stack.Build.Add(&computePayloadSHA256{}, middleware.After) +} + +// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the +// operation middleware stack +func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + return err +} + +// ID is the middleware name +func (m *computePayloadSHA256) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild compute the payload hash for the request payload +func (m *computePayloadSHA256) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call) + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { + return next.HandleBuild(ctx, in) + } + + hash := sha256.New() + if stream := req.GetStream(); stream != nil { + _, err = io.Copy(hash, stream) + if err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to compute payload hash, %w", err), + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to seek body to start, %w", err), + } + } + } + + ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + + return next.HandleBuild(ctx, in) +} + +// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the +// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. +// +// Use this to disable computing the Payload SHA256 checksum and instead use +// UNSIGNED-PAYLOAD for the SHA256 value. +func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + return err +} + +// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// the Payload hash stored in the context. +type contentSHA256Header struct{} + +// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the +// operation middleware stack +func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) +} + +// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware +// from the operation middleware stack +func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + return err +} + +// ID returns the ContentSHA256HeaderMiddleware identifier +func (m *contentSHA256Header) ID() string { + return "SigV4ContentSHA256Header" +} + +// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// stored in the context. +func (m *contentSHA256Header) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) + + return next.HandleBuild(ctx, in) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +type SignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !haveCredentialProvider(s.credentialsProvider) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) + + return next.HandleFinalize(ctx, in) +} + +type streamingEventsPayload struct{} + +// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. +func AddStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Build.Add(&streamingEventsPayload{}, middleware.After) +} + +func (s *streamingEventsPayload) ID() string { + return computePayloadHashMiddlewareID +} + +func (s *streamingEventsPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.StreamingEventsPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + + return next.HandleBuild(ctx, in) +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + const authHeaderSignatureElem = "Signature=" + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func haveCredentialProvider(p aws.CredentialsProvider) bool { + if p == nil { + return false + } + switch p.(type) { + case aws.AnonymousCredentials, + *aws.AnonymousCredentials: + return false + } + + return true +} + +type payloadHashKey struct{} + +// GetPayloadHash retrieves the payload hash to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPayloadHash(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) + return v +} + +// SetPayloadHash sets the payload hash to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPayloadHash(ctx context.Context, hash string) context.Context { + return middleware.WithStackValue(ctx, payloadHashKey{}, hash) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go new file mode 100644 index 000000000..e1a066512 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go @@ -0,0 +1,127 @@ +package v4 + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4 signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignedHTTPRequest provides the URL and signed headers that are included +// in the presigned URL. +type PresignedHTTPRequest struct { + URL string + Method string + SignedHeader http.Header +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !haveCredentialProvider(s.credentialsProvider) { + out.Result = &PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go new file mode 100644 index 000000000..66aa2bd6a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -0,0 +1,86 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "strings" + "time" +) + +// EventStreamSigner is an AWS EventStream protocol signer. +type EventStreamSigner interface { + GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) +} + +// StreamSignerOptions is the configuration options for StreamSigner. +type StreamSignerOptions struct{} + +// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. +type StreamSigner struct { + options StreamSignerOptions + + credentials aws.Credentials + service string + region string + + prevSignature []byte + + signingKeyDeriver *v4Internal.SigningKeyDeriver +} + +// NewStreamSigner returns a new AWS EventStream protocol signer. +func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { + o := StreamSignerOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &StreamSigner{ + options: o, + credentials: credentials, + service: service, + region: region, + signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), + prevSignature: seedSignature, + } +} + +// GetSignature signs the provided header and payload bytes. +func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + prevSignature := s.prevSignature + + st := v4Internal.NewSigningTime(signingTime) + + sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) + + scope := v4Internal.BuildCredentialScope(st, s.region, s.service) + + stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) + + signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) + s.prevSignature = signature + + return signature, nil +} + +func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { + hash := sha256.New() + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + signingTime.TimeFormat(), + credentialScope, + hex.EncodeToString(previousSignature), + hex.EncodeToString(makeHash(hash, headers)), + hex.EncodeToString(makeHash(hash, payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go new file mode 100644 index 000000000..06ba7773a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -0,0 +1,542 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + signingAlgorithm = "AWS4-HMAC-SHA256" + authorizationHeader = "Authorization" +) + +// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error +} + +type keyDerivator interface { + DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte +} + +// SignerOptions is the SigV4 Signer options. +type SignerOptions struct { + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // The logger to send log messages to. + Logger logging.Logger + + // Enable logging of signed requests. + // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + // presigned URL. + LogSigning bool +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + options SignerOptions + keyDerivator keyDerivator +} + +// NewSigner returns a new SigV4 Signer +func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} +} + +type httpSigner struct { + Request *http.Request + ServiceName string + Region string + Time v4Internal.SigningTime + Credentials aws.Credentials + KeyDerivator keyDerivator + IsPreSign bool + + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope + if s.IsPreSign { + query.Set(v4Internal.AmzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + var urlValues url.Values + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) + } + + var rawQuery strings.Builder + rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery.String(), + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery.WriteString("&X-Amz-Signature=") + rawQuery.WriteString(signingSignature) + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery.String() + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + 2 + + len(signedHeaders) + len(signedHeadersStr) + 2 + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The passed in request will be modified in place. +func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logSigningInfo(ctx, options, &signedRequest, false) + + return nil +} + +// PresignHTTP signs AWS v4 requests with the payload hash, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns the signed URL and the map of HTTP headers that were included in the +// signature or an error if signing the request failed. For presigned requests +// these headers and their values must be included on the HTTP request when it +// is made. This is helpful to know what header values need to be shared with +// the party the presigned request will be distributed to. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// PresignHTTP differs from SignHTTP in that it will sign the request using +// query string instead of header values. This allows you to share the +// Presigned Request's URL with third parties, or distribute it throughout your +// system with minimal dependencies. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +// +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) +// req.URL.RawQuery = query.Encode() +// +// This method does not modify the provided request. +func (s *Signer) PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), +) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r.Clone(r.Context()), + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logSigningInfo(ctx, options, &signedRequest, true) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) buildCredentialScope() string { + return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + if length > 0 { + const contentLengthHeader = "content-length" + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.TimeFormat(), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) + return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.TimeFormat() + + if s.IsPreSign { + query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) + if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + query.Set("X-Amz-Security-Token", sessionToken) + } + + query.Set(v4Internal.AmzDateKey, amzDate) + return + } + + headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) + + if len(s.Credentials.SessionToken) > 0 { + headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) + } +} + +func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go new file mode 100644 index 000000000..f3fc4d610 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go @@ -0,0 +1,297 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return ptr.Bool(v) +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + return ptr.BoolSlice(vs) +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + return ptr.BoolMap(vs) +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return ptr.Byte(v) +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + return ptr.ByteSlice(vs) +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + return ptr.ByteMap(vs) +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return ptr.String(v) +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + return ptr.StringSlice(vs) +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + return ptr.StringMap(vs) +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return ptr.Int(v) +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + return ptr.IntSlice(vs) +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + return ptr.IntMap(vs) +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return ptr.Int8(v) +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + return ptr.Int8Slice(vs) +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + return ptr.Int8Map(vs) +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return ptr.Int16(v) +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + return ptr.Int16Slice(vs) +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + return ptr.Int16Map(vs) +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return ptr.Int32(v) +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + return ptr.Int32Slice(vs) +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + return ptr.Int32Map(vs) +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return ptr.Int64(v) +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + return ptr.Int64Slice(vs) +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + return ptr.Int64Map(vs) +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return ptr.Uint(v) +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + return ptr.UintSlice(vs) +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + return ptr.UintMap(vs) +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return ptr.Uint8(v) +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + return ptr.Uint8Slice(vs) +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + return ptr.Uint8Map(vs) +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return ptr.Uint16(v) +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + return ptr.Uint16Slice(vs) +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + return ptr.Uint16Map(vs) +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return ptr.Uint32(v) +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + return ptr.Uint32Slice(vs) +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + return ptr.Uint32Map(vs) +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return ptr.Uint64(v) +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + return ptr.Uint64Slice(vs) +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + return ptr.Uint64Map(vs) +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return ptr.Float32(v) +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + return ptr.Float32Slice(vs) +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + return ptr.Float32Map(vs) +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return ptr.Float64(v) +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + return ptr.Float64Slice(vs) +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + return ptr.Float64Map(vs) +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return ptr.Time(v) +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + return ptr.TimeSlice(vs) +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + return ptr.TimeMap(vs) +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return ptr.Duration(v) +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + return ptr.DurationSlice(vs) +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + return ptr.DurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go new file mode 100644 index 000000000..26d90719b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -0,0 +1,310 @@ +package http + +import ( + "crypto/tls" + "github.com/aws/aws-sdk-go-v2/aws" + "net" + "net/http" + "reflect" + "sync" + "time" +) + +// Defaults for the HTTPTransportBuilder. +var ( + // Default connection pool options + DefaultHTTPTransportMaxIdleConns = 100 + DefaultHTTPTransportMaxIdleConnsPerHost = 10 + + // Default connection timeouts + DefaultHTTPTransportIdleConnTimeout = 90 * time.Second + DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second + DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second + + // Default to TLS 1.2 for all HTTPS requests. + DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 +) + +// Timeouts for net.Dialer's network connection. +var ( + DefaultDialConnectTimeout = 30 * time.Second + DefaultDialKeepAliveTimeout = 30 * time.Second +) + +// BuildableClient provides a HTTPClient implementation with options to +// create copies of the HTTPClient when additional configuration is provided. +// +// The client's methods will not share the http.Transport value between copies +// of the BuildableClient. Only exported member values of the Transport and +// optional Dialer will be copied between copies of BuildableClient. +type BuildableClient struct { + transport *http.Transport + dialer *net.Dialer + + initOnce sync.Once + + clientTimeout time.Duration + client *http.Client +} + +// NewBuildableClient returns an initialized client for invoking HTTP +// requests. +func NewBuildableClient() *BuildableClient { + return &BuildableClient{} +} + +// Do implements the HTTPClient interface's Do method to invoke a HTTP request, +// and receive the response. Uses the BuildableClient's current +// configuration to invoke the http.Request. +// +// If connection pooling is enabled (aka HTTP KeepAlive) the client will only +// share pooled connections with its own instance. Copies of the +// BuildableClient will have their own connection pools. +// +// Redirect (3xx) responses will not be followed, the HTTP response received +// will returned instead. +func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { + b.initOnce.Do(b.build) + + return b.client.Do(req) +} + +// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. +// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. +func (b *BuildableClient) Freeze() aws.HTTPClient { + cpy := b.clone() + cpy.build() + return cpy.client +} + +func (b *BuildableClient) build() { + b.client = wrapWithLimitedRedirect(&http.Client{ + Timeout: b.clientTimeout, + Transport: b.GetTransport(), + }) +} + +func (b *BuildableClient) clone() *BuildableClient { + cpy := NewBuildableClient() + cpy.transport = b.GetTransport() + cpy.dialer = b.GetDialer() + cpy.clientTimeout = b.clientTimeout + + return cpy +} + +// WithTransportOptions copies the BuildableClient and returns it with the +// http.Transport options applied. +// +// If a non (*http.Transport) was set as the round tripper, the round tripper +// will be replaced with a default Transport value before invoking the option +// functions. +func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { + cpy := b.clone() + + tr := cpy.GetTransport() + for _, opt := range opts { + opt(tr) + } + cpy.transport = tr + + return cpy +} + +// WithDialerOptions copies the BuildableClient and returns it with the +// net.Dialer options applied. Will set the client's http.Transport DialContext +// member. +func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { + cpy := b.clone() + + dialer := cpy.GetDialer() + for _, opt := range opts { + opt(dialer) + } + cpy.dialer = dialer + + tr := cpy.GetTransport() + tr.DialContext = cpy.dialer.DialContext + cpy.transport = tr + + return cpy +} + +// WithTimeout Sets the timeout used by the client for all requests. +func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { + cpy := b.clone() + cpy.clientTimeout = timeout + return cpy +} + +// GetTransport returns a copy of the client's HTTP Transport. +func (b *BuildableClient) GetTransport() *http.Transport { + var tr *http.Transport + if b.transport != nil { + tr = b.transport.Clone() + } else { + tr = defaultHTTPTransport() + } + + return tr +} + +// GetDialer returns a copy of the client's network dialer. +func (b *BuildableClient) GetDialer() *net.Dialer { + var dialer *net.Dialer + if b.dialer != nil { + dialer = shallowCopyStruct(b.dialer).(*net.Dialer) + } else { + dialer = defaultDialer() + } + + return dialer +} + +// GetTimeout returns a copy of the client's timeout to cancel requests with. +func (b *BuildableClient) GetTimeout() time.Duration { + return b.clientTimeout +} + +func defaultDialer() *net.Dialer { + return &net.Dialer{ + Timeout: DefaultDialConnectTimeout, + KeepAlive: DefaultDialKeepAliveTimeout, + DualStack: true, + } +} + +func defaultHTTPTransport() *http.Transport { + dialer := defaultDialer() + + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, + MaxIdleConns: DefaultHTTPTransportMaxIdleConns, + MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, + ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, + ForceAttemptHTTP2: true, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultHTTPTransportTLSMinVersion, + }, + } + + return tr +} + +// shallowCopyStruct creates a shallow copy of the passed in source struct, and +// returns that copy of the same struct type. +func shallowCopyStruct(src interface{}) interface{} { + srcVal := reflect.ValueOf(src) + srcValType := srcVal.Type() + + var returnAsPtr bool + if srcValType.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + srcValType = srcValType.Elem() + returnAsPtr = true + } + dstVal := reflect.New(srcValType).Elem() + + for i := 0; i < srcValType.NumField(); i++ { + ft := srcValType.Field(i) + if len(ft.PkgPath) != 0 { + // unexported fields have a PkgPath + continue + } + + dstVal.Field(i).Set(srcVal.Field(i)) + } + + if returnAsPtr { + dstVal = dstVal.Addr() + } + + return dstVal.Interface() +} + +// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to +// not follow any redirect other than 307 and 308. No other redirect will be +// followed. +// +// If the client does not have a Transport defined will use a new SDK default +// http.Transport configuration. +func wrapWithLimitedRedirect(c *http.Client) *http.Client { + tr := c.Transport + if tr == nil { + tr = defaultHTTPTransport() + } + + cc := *c + cc.CheckRedirect = limitedRedirect + cc.Transport = suppressBadHTTPRedirectTransport{ + tr: tr, + } + + return &cc +} + +// limitedRedirect is a CheckRedirect that prevents the client from following +// any non 307/308 HTTP status code redirects. +// +// The 307 and 308 redirects are allowed because the client must use the +// original HTTP method for the redirected to location. Whereas 301 and 302 +// allow the client to switch to GET for the redirect. +// +// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. +func limitedRedirect(r *http.Request, via []*http.Request) error { + // Request.Response, in CheckRedirect is the response that is triggering + // the redirect. + resp := r.Response + if r.URL.String() == badHTTPRedirectLocation { + resp.Header.Del(badHTTPRedirectLocation) + return http.ErrUseLastResponse + } + + switch resp.StatusCode { + case 307, 308: + // Only allow 307 and 308 redirects as they preserve the method. + return nil + } + + return http.ErrUseLastResponse +} + +// suppressBadHTTPRedirectTransport provides an http.RoundTripper +// implementation that wraps another http.RoundTripper to prevent HTTP client +// receiving 301 and 302 HTTP responses redirects without the required location +// header. +// +// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, +// that check for responses with having a URL of baseHTTPRedirectLocation, and +// suppress the redirect. +type suppressBadHTTPRedirectTransport struct { + tr http.RoundTripper +} + +const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` + +// RoundTrip backfills a stub location when a 301/302 response is received +// without a location. This stub location is used by limitedRedirect to prevent +// the HTTP client from failing attempting to use follow a redirect without a +// location value. +func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := t.tr.RoundTrip(r) + if err != nil { + return resp, err + } + + // S3 is the only known service to return 301 without location header. + // The Go standard library HTTP client will return an opaque error if it + // tries to follow a 301/302 response missing the location header. + switch resp.StatusCode { + case 301, 302: + if v := resp.Header.Get("Location"); len(v) == 0 { + resp.Header.Set("Location", badHTTPRedirectLocation) + } + } + + return resp, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go new file mode 100644 index 000000000..556f54a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go @@ -0,0 +1,42 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// removeContentTypeHeader is a build middleware that removes +// content type header if content-length header is unset or +// is set to zero, +type removeContentTypeHeader struct { +} + +// ID the name of the middleware. +func (m *removeContentTypeHeader) ID() string { + return "RemoveContentTypeHeader" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + // remove contentTypeHeader when content-length is zero + if req.ContentLength == 0 { + req.Header.Del("content-type") + } + + return next.HandleBuild(ctx, in) +} + +// RemoveContentTypeHeader removes content-type header if +// content length is unset or equal to zero. +func RemoveContentTypeHeader(stack *middleware.Stack) error { + return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go new file mode 100644 index 000000000..44651c990 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go @@ -0,0 +1,33 @@ +package http + +import ( + "errors" + "fmt" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *smithyhttp.ResponseError + + // RequestID associated with response error + RequestID string +} + +// ServiceRequestID returns the request id associated with Response Error +func (e *ResponseError) ServiceRequestID() string { return e.RequestID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, %v", + e.Response.StatusCode, e.RequestID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.AWS HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go new file mode 100644 index 000000000..8fd14cecd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -0,0 +1,54 @@ +package http + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) +} + +type responseErrorWrapper struct { +} + +// ID returns the middleware identifier +func (m *responseErrorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go new file mode 100644 index 000000000..993929bd9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -0,0 +1,104 @@ +package http + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type readResult struct { + n int + err error +} + +// ResponseTimeoutError is an error when the reads from the response are +// delayed longer than the timeout the read was configured for. +type ResponseTimeoutError struct { + TimeoutDur time.Duration +} + +// Timeout returns that the error is was caused by a timeout, and can be +// retried. +func (*ResponseTimeoutError) Timeout() bool { return true } + +func (e *ResponseTimeoutError) Error() string { + return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, &ResponseTimeoutError{TimeoutDur: r.duration} + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the +// response body so that a read that takes too long will return an error. +func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { + return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) +} + +// readTimeout wraps the response body with a timeoutReadCloser +type readTimeout struct { + duration time.Duration +} + +// ID returns the id of the middleware +func (*readTimeout) ID() string { + return "ReadResponseTimeout" +} + +// HandleDeserialize implements the DeserializeMiddleware interface +func (m *readTimeout) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + response.Body = &timeoutReadCloser{ + reader: response.Body, + duration: m.duration, + } + out.RawResponse = response + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go new file mode 100644 index 000000000..cc3ae8114 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go @@ -0,0 +1,42 @@ +package aws + +import ( + "fmt" +) + +// Ternary is an enum allowing an unknown or none state in addition to a bool's +// true and false. +type Ternary int + +func (t Ternary) String() string { + switch t { + case UnknownTernary: + return "unknown" + case FalseTernary: + return "false" + case TrueTernary: + return "true" + default: + return fmt.Sprintf("unknown value, %d", int(t)) + } +} + +// Bool returns true if the value is TrueTernary, false otherwise. +func (t Ternary) Bool() bool { + return t == TrueTernary +} + +// Enumerations for the values of the Ternary type. +const ( + UnknownTernary Ternary = iota + FalseTernary + TrueTernary +) + +// BoolTernary returns a true or false Ternary value for the bool provided. +func BoolTernary(v bool) Ternary { + if v { + return TrueTernary + } + return FalseTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go new file mode 100644 index 000000000..5f729d45e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go-v2" + +// SDKVersion is the version of this SDK +const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md new file mode 100644 index 000000000..1ac06cf74 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -0,0 +1,62 @@ +# v1.1.6 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go new file mode 100644 index 000000000..cd4d19b89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -0,0 +1,65 @@ +package configsources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" +) + +// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value +// for Enable Endpoint Discovery +type EnableEndpointDiscoveryProvider interface { + GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) +} + +// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. +// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, +// and error if one is encountered. +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { + value, found, err = p.GetEnableEndpointDiscovery(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint +type UseDualStackEndpointProvider interface { + GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) +} + +// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseDualStackEndpointProvider); ok { + value, found, err = p.GetUseDualStackEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint +type UseFIPSEndpointProvider interface { + GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) +} + +// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. +// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseFIPSEndpointProvider); ok { + value, found, err = p.GetUseFIPSEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go new file mode 100644 index 000000000..04fe4f6b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package configsources + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.1.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md new file mode 100644 index 000000000..790b2d203 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -0,0 +1,35 @@ +# v2.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.0 (2021-11-06) + +* **Release**: Endpoint Variant Model Support +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go new file mode 100644 index 000000000..32251a7e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -0,0 +1,302 @@ +package endpoints + +import ( + "fmt" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// DefaultKey is a compound map key of a variant and other values. +type DefaultKey struct { + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointKey is a compound map key of a region and associated variant value. +type EndpointKey struct { + Region string + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointVariant is a bit field to describe the endpoints attributes. +type EndpointVariant uint64 + +const ( + // FIPSVariant indicates that the endpoint is FIPS capable. + FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) + + // DualStackVariant indicates that the endpoint is DualStack capable. + DualStackVariant +) + +// ServiceVariant is a bit field to describe the service endpoint attributes. +type ServiceVariant uint64 + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "s3v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. + LogDeprecated bool + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool + + // Instruct the resolver to use a service endpoint that supports dual-stack. + // If a service does not have a dual-stack endpoint an error will be returned by the resolver. + UseDualStackEndpoint aws.DualStackEndpointState + + // Instruct the resolver to use a service endpoint that supports FIPS. + // If a service does not have a FIPS endpoint an error will be returned by the resolver. + UseFIPSEndpoint aws.FIPSEndpointState + + // ServiceVariant is a bitfield of service specified endpoint variant data. + ServiceVariant ServiceVariant +} + +// GetEndpointVariant returns the EndpointVariant for the variant associated options. +func (o Options) GetEndpointVariant() (v EndpointVariant) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + v |= DualStackVariant + } + if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { + v |= FIPSVariant + } + return v +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + if opts.Logger == nil { + opts.Logger = logging.Nop{} + } + + if len(opts.ResolvedRegion) > 0 { + region = opts.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region, opts) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults map[DefaultKey]Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string, opts Options) bool { + _, ok := p.Endpoints[EndpointKey{ + Region: region, + Variant: opts.GetEndpointVariant(), + }] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + endpoints := p.Endpoints + + variant := options.GetEndpointVariant() + serviceVariant := options.ServiceVariant + + defaults := p.Defaults[DefaultKey{ + Variant: variant, + ServiceVariant: serviceVariant, + }] + + return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) +} + +func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { + key := EndpointKey{ + Region: region, + Variant: variant, + } + + if e, ok := endpoints[key]; ok { + return e + } + + if !p.IsRegionalized { + return endpoints[EndpointKey{ + Region: p.PartitionEndpoint, + Variant: variant, + ServiceVariant: serviceVariant, + }] + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{} +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[EndpointKey]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string + + // Indicates that this endpoint is deprecated. + Deprecated aws.Ternary +} + +// IsZero returns whether the endpoint structure is an empty (zero) value. +func (e Endpoint) IsZero() bool { + switch { + case e.Unresolveable != aws.UnknownTernary: + return false + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (CredentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + } + return true +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + if e.IsZero() { + return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) + } + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + if e.Deprecated == aws.TrueTernary && options.LogDeprecated { + options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) + } + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if other.Deprecated != aws.UnknownTernary { + e.Deprecated = other.Deprecated + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go new file mode 100644 index 000000000..71b4d202a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpoints + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "2.4.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go new file mode 100644 index 000000000..9791ea590 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -0,0 +1,33 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +var floatMaxBigInt = big.NewInt(1 << 53) + +// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). +func Float64(reader io.Reader) (float64, error) { + bi, err := rand.Int(reader, floatMaxBigInt) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %v", err) + } + + return float64(bi.Int64()) / (1 << 53), nil +} + +// CryptoRandFloat64 returns a random float64 obtained from the crypto rand +// source. +func CryptoRandFloat64() (float64, error) { + return Float64(rand.Reader) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go new file mode 100644 index 000000000..2b42cbe64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go @@ -0,0 +1,9 @@ +package sdk + +// Invalidator provides access to a type's invalidate method to make it +// invalidate it cache. +// +// e.g aws.SafeCredentialsProvider's Invalidate method. +type Invalidator interface { + Invalidate() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go new file mode 100644 index 000000000..8e8dabad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go @@ -0,0 +1,74 @@ +package sdk + +import ( + "context" + "time" +) + +func init() { + NowTime = time.Now + Sleep = time.Sleep + SleepWithContext = sleepWithContext +} + +// NowTime is a value for getting the current time. This value can be overridden +// for testing mocking out current time. +var NowTime func() time.Time + +// Sleep is a value for sleeping for a duration. This value can be overridden +// for testing and mocking out sleep duration. +var Sleep func(time.Duration) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// This value can be overridden for testing and mocking out sleep duration. +var SleepWithContext func(context.Context, time.Duration) error + +// sleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the +// Context's error will be returned. +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// noOpSleepWithContext does nothing, returns immediately. +func noOpSleepWithContext(context.Context, time.Duration) error { + return nil +} + +func noOpSleep(time.Duration) {} + +// TestingUseNopSleep is a utility for disabling sleep across the SDK for +// testing. +func TestingUseNopSleep() func() { + SleepWithContext = noOpSleepWithContext + Sleep = noOpSleep + + return func() { + SleepWithContext = sleepWithContext + Sleep = time.Sleep + } +} + +// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time +// for testing purposes. +func TestingUseReferenceTime(referenceTime time.Time) func() { + NowTime = func() time.Time { + return referenceTime + } + return func() { + NowTime = time.Now + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go new file mode 100644 index 000000000..5d69db5f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go @@ -0,0 +1,13 @@ +package timeconv + +import "time" + +// FloatSecondsDur converts a fractional seconds to duration. +func FloatSecondsDur(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +// DurSecondsFloat converts a duration into fractional seconds. +func DurSecondsFloat(d time.Duration) float64 { + return float64(d) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md new file mode 100644 index 000000000..fe3f378d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -0,0 +1,79 @@ +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go new file mode 100644 index 000000000..cc919701a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -0,0 +1,48 @@ +package presignedurl + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// WithIsPresigning adds the isPresigning sentinel value to a context to signal +// that the middleware stack is using the presign flow. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func WithIsPresigning(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, isPresigningKey{}, true) +} + +// GetIsPresigning returns if the context contains the isPresigning sentinel +// value for presigning flows. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetIsPresigning(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) + return v +} + +type isPresigningKey struct{} + +// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// will update the stack's context to be flagged as being invoked for the +// purpose of presigning. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) +} + +type asIsPresigningMiddleware struct{} + +func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } + +func (asIsPresigningMiddleware) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + ctx = WithIsPresigning(ctx) + return next.HandleInitialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go new file mode 100644 index 000000000..1b85375cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go @@ -0,0 +1,3 @@ +// Package presignedurl provides the customizations for API clients to fill in +// presigned URLs into input parameters. +package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go new file mode 100644 index 000000000..1734a92c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package presignedurl + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.9.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go new file mode 100644 index 000000000..1e2f5c812 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -0,0 +1,110 @@ +package presignedurl + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + + "github.com/aws/smithy-go/middleware" +) + +// URLPresigner provides the interface to presign the input parameters in to a +// presigned URL. +type URLPresigner interface { + // PresignURL presigns a URL. + PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) +} + +// ParameterAccessor provides an collection of accessor to for retrieving and +// setting the values needed to PresignedURL generation +type ParameterAccessor struct { + // GetPresignedURL accessor points to a function that retrieves a presigned url if present + GetPresignedURL func(interface{}) (string, bool, error) + + // GetSourceRegion accessor points to a function that retrieves source region for presigned url + GetSourceRegion func(interface{}) (string, bool, error) + + // CopyInput accessor points to a function that takes in an input, and returns a copy. + CopyInput func(interface{}) (interface{}, error) + + // SetDestinationRegion accessor points to a function that sets destination region on api input struct + SetDestinationRegion func(interface{}, string) error + + // SetPresignedURL accessor points to a function that sets presigned url on api input struct + SetPresignedURL func(interface{}, string) error +} + +// Options provides the set of options needed by the presigned URL middleware. +type Options struct { + // Accessor are the parameter accessors used by this middleware + Accessor ParameterAccessor + + // Presigner is the URLPresigner used by the middleware + Presigner URLPresigner +} + +// AddMiddleware adds the Presign URL middleware to the middleware stack. +func AddMiddleware(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&presign{options: opts}, middleware.Before) +} + +// RemoveMiddleware removes the Presign URL middleware from the stack. +func RemoveMiddleware(stack *middleware.Stack) error { + _, err := stack.Initialize.Remove((*presign)(nil).ID()) + return err +} + +type presign struct { + options Options +} + +func (m *presign) ID() string { return "Presign" } + +func (m *presign) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // If PresignedURL is already set ignore middleware. + if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if ok { + return next.HandleInitialize(ctx, input) + } + + // If have source region is not set ignore middleware. + srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if !ok || len(srcRegion) == 0 { + return next.HandleInitialize(ctx, input) + } + + // Create a copy of the original input so the destination region value can + // be added. This ensures that value does not leak into the original + // request parameters. + paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Destination region is the API client's configured region. + dstRegion := awsmiddleware.GetRegion(ctx) + if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Update the original input with the presigned URL value. + if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + return next.HandleInitialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 000000000..5f7a8af89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,100 @@ +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 000000000..4bff1dfe2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,534 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 000000000..7d00b6bd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,417 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources that you might not normally have access to. These +// temporary credentials consist of an access key ID, a secret access key, and a +// security token. Typically, you use AssumeRole within your account or for +// cross-account access. For a comparison of AssumeRole with other API operations +// that produce temporary credentials, see Requesting Temporary Security +// Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRole can be used to make API calls to any Amazon Web Services service with +// the following exception: You cannot call the Amazon Web Services STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: A role +// trust policy that specifies who can assume the role and a permissions policy +// that specifies what can be done with the role. You specify the trusted principal +// who is allowed to assume the role in the role trust policy. To assume a role +// from a different account, your Amazon Web Services account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when the +// role is created. That trust policy states which accounts are allowed to delegate +// that access to users in the account. A user who wants to access a role in a +// different account must also have permissions that are delegated from the user +// account administrator. The administrator must attach a policy that allows the +// user to call AssumeRole for the ARN of the role in the other account. To allow a +// user to assume a role in the same account, you can do either of the +// following: +// +// * Attach a policy to the user that allows the user to call +// AssumeRole (as long as the role's trust policy trusts the account). +// +// * Add the +// user as a principal directly in the role's trust policy. +// +// You can do either +// because the role’s trust policy acts as an IAM resource-based policy. When a +// resource-based policy grants access to a principal in the same account, no +// additional identity-based policy is required. For more information about trust +// policies and resource-based policies, see IAM Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the +// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// session. These tags are called session tags. For more information about session +// tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. An administrator must grant you the permissions necessary to +// pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include +// multi-factor authentication (MFA) information when you call AssumeRole. This is +// useful for cross-account scenarios to ensure that the user that assumes the role +// has been authenticated with an Amazon Web Services MFA device. In that scenario, +// the trust policy of the role being assumed includes a condition that tests for +// MFA authentication. If the caller does not include valid MFA information, the +// request to assume the role is denied. The condition in a trust policy that tests +// for MFA authentication might look like the following example. "Condition": +// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see +// Configuring MFA-Protected API Access +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the +// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the +// SerialNumber and TokenCode parameters. The SerialNumber value identifies the +// user's hardware or virtual MFA device. The TokenCode is the time-based one-time +// password (TOTP) that the MFA device produces. +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Use the role session name to + // uniquely identify a session when the same role is assumed by different + // principals or for different reasons. In cross-account scenarios, the role + // session name is visible to, and can be logged by the account that owns the role. + // The role session name is also used in the ARN of the assumed role principal. + // This means that subsequent cross-account API requests that use the temporary + // security credentials will expose the role session name to the external account + // in their CloudTrail logs. The regex used to validate this parameter is a string + // of characters consisting of upper- and lower-case alphanumeric characters with + // no spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web + // Services CLI or Amazon Web Services API role session to a maximum of one hour. + // When you use the AssumeRole API operation to assume a role, you can specify the + // duration of your role session with the DurationSeconds parameter. You can + // specify a parameter value of up to 43200 seconds (12 hours), depending on the + // maximum session duration setting for your role. However, if you assume a role + // using role chaining and provide a DurationSeconds parameter value greater than + // one hour, the operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see How to Use an External ID When Granting Access to Your + // Amazon Web Services Resources to a Third Party + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@:/- + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user who + // is making the AssumeRole call. Specify this value if the trust policy of the + // role being assumed includes a condition that requires MFA authentication. The + // value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter + // is a string of characters consisting of upper- and lower-case alphanumeric + // characters with no spaces. You can also include underscores or any of the + // following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@-. You cannot use a value that begins with the text aws:. This prefix is + // reserved for Amazon Web Services internal use. + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + // The plaintext session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. You can pass a session tag with the same key as a tag that is + // already attached to the role. When you do, session tags override a role tag with + // the same key. Tag key–value pairs are not case sensitive, but case is preserved. + // This means that you cannot have separate Department and department tag keys. + // Assume that the role has the Department=Marketing tag and you pass the + // department=engineering session tag. Department and department are not saved as + // separate tags, and the session tag passed in the request takes precedence over + // the role tag. Additionally, if you used temporary credentials to perform this + // operation, the new session inherits any transitive session tags from the calling + // session. If you pass a session tag with the same key as an inherited tag, the + // operation fails. To view the inherited tags for a session, see the CloudTrail + // logs. For more information, see Viewing Session Tags in CloudTrail + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. The format for this parameter, as described by its regex pattern, is a + // sequence of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. When you set session tags as + // transitive, the session policy and session tags packed binary limit is not + // affected. If you choose not to specify a transitive tag key, then no tags are + // passed from this session to any subsequent sessions. + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole. + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRole", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 000000000..e12315e4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,377 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this +// operation consist of an access key ID, a secret access key, and a security +// token. Applications can use these temporary security credentials to sign calls +// to Amazon Web Services services. Session Duration By default, the temporary +// security credentials created by AssumeRoleWithSAML last for one hour. However, +// you can use the optional DurationSeconds parameter to specify the duration of +// your session. Your role session lasts for the duration that you specify, or +// until the time specified in the SAML authentication response's +// SessionNotOnOrAfter value, whichever is shorter. You can provide a +// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour to 12 +// hours. To learn how to view the maximum value for your role, see View the +// Maximum Session Duration Setting for a Role +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM +// User Guide. Role chaining +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. Permissions The temporary security +// credentials created by AssumeRoleWithSAML can be used to make API calls to any +// Amazon Web Services service with the following exception: you cannot call the +// STS GetFederationToken or GetSessionToken API operations. (Optional) You can +// pass inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of +// Amazon Web Services security credentials. The identity of the caller is +// validated by using keys in the metadata document that is uploaded for the SAML +// provider entity for your identity provider. Calling AssumeRoleWithSAML can +// result in an entry in your CloudTrail logs. The entry includes the value in the +// NameID element of the SAML assertion. We recommend that you use a NameIDType +// that is not associated with any personally identifiable information (PII). For +// example, you could instead use the persistent identifier +// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can +// configure your IdP to pass attributes into your SAML assertion as session tags. +// Each session tag consists of a key name and an associated value. For more +// information about session tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag +// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For +// these and additional limits, see IAM and STS Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// session policies and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates by +// percentage how close the policies and tags for your request are to the upper +// size limit. You can pass a session tag with the same key as a tag that is +// attached to the role. When you do, session tags override the role's tags with +// the same key. An administrator must grant you the permissions necessary to pass +// session tags. The administrator can also create granular permissions to allow +// you to pass only specific session tags. For more information, see Tutorial: +// Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. SAML Configuration Before your application can call +// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to +// issue the claims required by Amazon Web Services. Additionally, you must use +// Identity and Access Management (IAM) to create a SAML provider entity in your +// Amazon Web Services account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// For more information, see the following resources: +// +// * About SAML 2.0-based +// Federation +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. For more + // information, see Configuring a Relying Party and Adding Claims + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // + // * The Issuer response + // value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last + // part of the ARN) of the SAML provider in IAM. + // + // The combination of NameQualifier + // and Subject can be used to uniquely identify a federated user. The following + // pseudocode shows how the hash value is calculated: BASE64 ( SHA1 ( + // "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. You can require + // users to set a source identity value when they assume a role. You do this by + // using the sts:SourceIdentity condition key in a role trust policy. That way, + // actions that are taken with the role are associated with that user. After the + // source identity is set, the value cannot be changed. It is present in the + // request for all actions that are taken by the role and persists across chained + // role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithSAML. You do this by adding an attribute to the SAML + // assertion. For more information about using source identity, see Monitor and + // control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent. If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If + // the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithSAML", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 000000000..2e8b51c98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,395 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or +// Amazon Cognito federated identities +// (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify a +// user. You can also supply the user with a consistent identity throughout the +// lifetime of an application. To learn more about Amazon Cognito, see Amazon +// Cognito Overview +// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito +// Overview +// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the Amazon Web Services SDK for iOS Developer Guide. Calling +// AssumeRoleWithWebIdentity does not require the use of Amazon Web Services +// security credentials. Therefore, you can distribute an application (for example, +// on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this API +// consist of an access key ID, a secret access key, and a security token. +// Applications can use these temporary security credentials to sign calls to +// Amazon Web Services service API operations. Session Duration By default, the +// temporary security credentials created by AssumeRoleWithWebIdentity last for one +// hour. However, you can use the optional DurationSeconds parameter to specify the +// duration of your session. You can provide a value from 900 seconds (15 minutes) +// up to the maximum session duration setting for the role. This setting can have a +// value from 1 hour to 12 hours. To learn how to view the maximum value for your +// role, see View the Maximum Session Duration Setting for a Role +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM +// User Guide. Permissions The temporary security credentials created by +// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web +// Services service with the following exception: you cannot call the STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass +// attributes into your web identity token as session tags. Each session tag +// consists of a key name and an associated value. For more information about +// session tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag +// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For +// these and additional limits, see IAM and STS Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// session policies and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates by +// percentage how close the policies and tags for your request are to the upper +// size limit. You can pass a session tag with the same key as a tag that is +// attached to the role. When you do, the session tag overrides the role tag with +// the same key. An administrator must grant you the permissions necessary to pass +// session tags. The administrator can also create granular permissions to allow +// you to pass only specific session tags. For more information, see Tutorial: +// Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Identities Before your application can call +// AssumeRoleWithWebIdentity, you must have an identity token from a supported +// identity provider and create a role that the application can assume. The role +// that your application assumes must trust the identity provider that is +// associated with the identity token. In other words, the identity provider must +// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can +// result in an entry in your CloudTrail logs. The entry includes the Subject +// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided +// web identity token. We recommend that you avoid using any personally +// identifiable information (PII) in this field. For example, you could instead use +// a GUID or a pairwise identifier, as suggested in the OIDC specification +// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more +// information about how to use web identity federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// +// * Using Web +// Identity Federation API Operations for Mobile Apps +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * +// Web Identity Federation Playground +// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, Facebook, +// or Google, getting temporary security credentials, and then using those +// credentials to make a request to Amazon Web Services. +// +// * Amazon Web Services SDK +// for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and Amazon Web +// Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity +// Federation with Mobile Applications +// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. The regex used to + // validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@- + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole. + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. The size of the security token that STS API + // operations return is not fixed. We strongly recommend that you make no + // assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect ID + // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens, + // this contains the value of the ProviderId parameter that was passed in the + // AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. You can require users to set a source identity value + // when they assume a role. You do this by using the sts:SourceIdentity condition + // key in a role trust policy. That way, actions that are taken with the role are + // associated with that user. After the source identity is set, the value cannot be + // changed. It is present in the request for all actions that are taken by the role + // and persists across chained role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with User + // Pools + // (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithWebIdentity", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 000000000..b7a637d42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. For +// example, if a user is not authorized to perform an operation that he or she has +// requested, the request returns a Client.UnauthorizedOperation response (an HTTP +// 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. Only +// certain Amazon Web Services operations return an encoded authorization message. +// The documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. The message is +// encoded because the details of the authorization status can contain privileged +// information that the user who requested the operation should not see. To decode +// an authorization status message, a user must be granted permissions through an +// IAM policy +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to +// request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. +// The decoded message includes the following type of information: +// +// * Whether the +// request was denied due to an explicit deny or due to the absence of an explicit +// allow. For more information, see Determining Whether a Request is Allowed or +// Denied +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested +// action. +// +// * The requested resource. +// +// * The values of condition keys in the +// context of the user's request. +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "DecodeAuthorizationMessage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 000000000..b86a425d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. Access keys +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a +// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For +// more information about access keys, see Managing Access Keys for IAM Users +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. When you pass an access key ID to this operation, it +// returns the ID of the Amazon Web Services account to which the keys belong. +// Access key IDs beginning with AKIA are long-term credentials for an IAM user or +// the Amazon Web Services account root user. Access key IDs beginning with ASIA +// are temporary credentials that are created using STS operations. If the account +// in the response belongs to you, you can sign in as the root user and review your +// root user access keys. Then, you can pull a credentials report +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail logs +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. This operation does not indicate the state of the access +// key. The key might be active, inactive, or deleted. Active keys might not have +// permissions to perform an operation. Providing a deleted access key might return +// an error that the key doesn't exist. +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. This parameter allows (through its regex + // pattern) a string of characters that can consist of any upper- or lowercase + // letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetAccessKeyInfo", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 000000000..a7f96c220 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. No permissions are required to perform this operation. If an +// administrator adds a policy to your IAM user or role that explicitly denies +// access to the sts:GetCallerIdentity action, you can still perform this +// operation. Permissions are not required because the same information is returned +// when an IAM user or role is denied access. To view an example response, see I Am +// Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the type + // of entity that is making the call. The values returned are those listed in the + // aws:userid column in the Principal table + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 000000000..01a3d411b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,324 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key ID, +// a secret access key, and a security token) for a federated user. A typical use +// is in a proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. You must call the +// GetFederationToken operation using the long-term security credentials of an IAM +// user. As a result, this call is appropriate in contexts where those credentials +// can be safely stored, usually in a server-based application. For a comparison of +// GetFederationToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity. For more information, see Federation Through a +// Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. You can also call GetFederationToken using the security +// credentials of an Amazon Web Services account root user, but we do not recommend +// it. Instead, we recommend that you create an IAM user for the purpose of the +// proxy application. Then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need to access. For more +// information, see IAM Best Practices +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the +// IAM User Guide. Session duration The temporary credentials are valid for the +// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). +// Temporary credentials obtained by using the Amazon Web Services account root +// user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service except the following: +// +// * You cannot call any IAM +// operations using the CLI or the Amazon Web Services API. +// +// * You cannot call any +// STS operations except GetCallerIdentity. +// +// You must pass an inline or managed +// session policy +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Though the session policy +// parameters are optional, if you do not pass a policy, then the resulting +// federated user session has no permissions. When you pass session policies, the +// session permissions are the intersection of the IAM user policies and the +// session policies that you pass. This gives you a way to further restrict the +// permissions for a federated user. You cannot use session policies to grant more +// permissions than those that are defined in the permissions policy of the IAM +// user. For more information, see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to create +// temporary security credentials, see GetFederationToken—Federation Through a +// Custom Identity Broker +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. Tags (Optional) You can pass tag key-value pairs to your +// session. These are called session tags. For more information about session tags, +// see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can create a mobile-based or browser-based app that can +// authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity. For more information, see Federation Through a +// Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is +// preserved. This means that you cannot have separate Department and department +// tag keys. Assume that the user that you are federating has the +// Department=Marketing tag and you pass the department=engineering session tag. +// Department and department are not saved as separate tags, and the session tag +// passed in the request takes precedence over the user tag. +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference the + // federated user name in a resource-based policy, such as in an Amazon S3 bucket + // policy. The regex used to validate this parameter is a string of characters + // consisting of upper- and lower-case alphanumeric characters with no spaces. You + // can also include underscores or any of the following characters: =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations for + // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36 + // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using + // Amazon Web Services account root user credentials are restricted to a maximum of + // 3,600 seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // You must pass an inline or managed session policy + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to use as + // managed session policies. This parameter is optional. However, if you do not + // pass any session policies, then the resulting federated user session has no + // permissions. When you pass session policies, the session permissions are the + // intersection of the IAM user policies and the session policies that you pass. + // This gives you a way to further restrict the permissions for a federated user. + // You cannot use session policies to grant more permissions than those that are + // defined in the permissions policy of the IAM user. For more information, see + // Session Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. An Amazon Web Services conversion compresses the + // passed session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. You must pass an inline or + // managed session policy + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to use as + // managed session policies. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. You can provide up to 10 managed + // policy ARNs. For more information about ARNs, see Amazon Resource Names (ARNs) + // and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. This parameter is optional. However, + // if you do not pass any session policies, then the resulting federated user + // session has no permissions. When you pass session policies, the session + // permissions are the intersection of the IAM user policies and the session + // policies that you pass. This gives you a way to further restrict the permissions + // for a federated user. You cannot use session policies to grant more permissions + // than those that are defined in the permissions policy of the IAM user. For more + // information, see Session Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // An Amazon Web Services conversion compresses the passed session policies and + // session tags into a packed binary format that has a separate limit. Your request + // can fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see Passing Session + // Tags in STS + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + // The plaintext session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. You can pass a session tag with the same key as a tag that is + // already attached to the user you are federating. When you do, session tags + // override a user tag with the same key. Tag key–value pairs are not case + // sensitive, but case is preserved. This means that you cannot have separate + // Department and department tag keys. Assume that the role has the + // Department=Marketing tag and you pass the department=engineering session tag. + // Department and department are not saved as separate tags, and the session tag + // passed in the request takes precedence over the role tag. + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetFederationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 000000000..b292f208a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or IAM +// user. The credentials consist of an access key ID, a secret access key, and a +// security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances. MFA-enabled IAM users would need to call +// GetSessionToken and submit an MFA code that is associated with their MFA device. +// Using the temporary security credentials that are returned from the call, IAM +// users can then make programmatic calls to API operations that require MFA +// authentication. If you do not supply a correct MFA code, then the API returns an +// access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Session Duration The GetSessionToken operation must be +// called by using the long-term Amazon Web Services security credentials of the +// Amazon Web Services account root user or an IAM user. Credentials that are +// created by IAM users are valid for the duration that you specify. This duration +// can range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. Permissions The temporary security +// credentials created by GetSessionToken can be used to make API calls to any +// Amazon Web Services service with the following exceptions: +// +// * You cannot call +// any IAM API operations unless MFA authentication information is included in the +// request. +// +// * You cannot call any STS API except AssumeRole or +// GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with +// Amazon Web Services account root user credentials. Instead, follow our best +// practices +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, and +// using IAM users for everyday interaction with Amazon Web Services. The +// credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. If +// GetSessionToken is called using Amazon Web Services account root user +// credentials, the temporary credentials have root user permissions. Similarly, if +// GetSessionToken is called using the credentials of an IAM user, the temporary +// credentials have the same permissions as the IAM user. For more information +// about using GetSessionToken to create temporary credentials, go to Temporary +// Credentials for Users in Untrusted Environments +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM user + // who is making the GetSessionToken call. Specify this value if the IAM user has a + // policy that requires MFA authentication. The value is either the serial number + // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) + // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find + // the device for an IAM user by going to the Amazon Web Services Management + // Console and viewing the user's security credentials. The regex used to validate + // this parameter is a string of characters consisting of upper- and lower-case + // alphanumeric characters with no spaces. You can also include underscores or any + // of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication is + // required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. The + // format for this parameter, as described by its regex pattern, is a sequence of + // six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetSessionToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 000000000..5d634ce35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2507 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 000000000..7cabbb97e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,12 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// Security Token Service Security Token Service (STS) enables you to request +// temporary, limited-privilege credentials for Identity and Access Management +// (IAM) users or for users that you authenticate (federated users). This guide +// provides descriptions of the STS API. For more information about using this +// service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 000000000..cababea22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 000000000..86341bb7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,35 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 000000000..9af7dc268 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 000000000..28ed441bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,445 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 000000000..05531d369 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,835 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 000000000..b109fe5fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" } +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { return "IDPCommunicationError" } +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. If this error is returned for the +// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired +// or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { return "IDPRejectedClaim" } +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +type InvalidAuthorizationMessageException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + return "InvalidAuthorizationMessageException" +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { return "InvalidIdentityToken" } +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { return "MalformedPolicyDocument" } +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You could receive this error even though you meet other defined +// session policy and session tag limits. For more information, see IAM and STS +// Entity Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +type PackedPolicyTooLargeException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { return "PackedPolicyTooLarge" } +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being asked +// to generate credentials. The account administrator must use the IAM console to +// activate STS in that region. For more information, see Activating and +// Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +type RegionDisabledException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { return "RegionDisabledException" } +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 000000000..86e509905 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,124 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in + // the IAM User Guide. + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in + // the IAM User Guide. + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource Names + // (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. + Arn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags to +// control access to resources. For more information, see Tagging Amazon Web +// Services STS Sessions +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. +type Tag struct { + + // The key for a session tag. You can pass up to 50 session tags. The plain text + // session tag keys can’t exceed 128 characters. For these and additional limits, + // see IAM and STS Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Key *string + + // The value for a session tag. You can pass up to 50 session tags. The plain text + // session tag values can’t exceed 256 characters. For these and additional limits, + // see IAM and STS Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 000000000..3e4bad2a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 79f18fb2f..4818ea427 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -170,6 +170,9 @@ type Config struct { // // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. LowerCaseHeaderMaps *bool // Set this to `true` to disable the EC2Metadata client from overriding the diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 9a8a5fada..f70c832b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -2293,15 +2293,60 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + }, }, }, "appstream2": service{ @@ -2828,6 +2873,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3004,6 +3052,20 @@ var awsPartition = partition{ }, }, }, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "braket": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -3588,6 +3650,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -3597,6 +3662,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11021,6 +11089,18 @@ var awsPartition = partition{ }, "ivs": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13773,6 +13853,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -21939,6 +22022,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24169,6 +24255,14 @@ var awsusgovPartition = partition{ }, }, "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", @@ -24353,20 +24447,40 @@ var awsusgovPartition = partition{ "cloudtrail": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", }, }, }, @@ -25358,20 +25472,40 @@ var awsusgovPartition = partition{ "events": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "events.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "events.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", }, }, }, @@ -26242,20 +26376,40 @@ var awsusgovPartition = partition{ "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "logs.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "logs.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", }, }, }, @@ -27591,25 +27745,55 @@ var awsusgovPartition = partition{ "sns": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "sns.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "sns.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, }, }, }, "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 752ae47f8..3f0001f91 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -15,8 +15,8 @@ import ( // and determine if a request API error should be retried. // // client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. type Retryer interface { // RetryRules return the retry delay that should be used by the SDK before // making another request attempt for the failed request. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 9bded1622..79277614e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.43.8" +const SDKVersion = "1.43.22" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 5f13d4acb..63f66af2c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -276,6 +276,25 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) value = base64.StdEncoding.EncodeToString([]byte(value)) } str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) case []byte: str = base64.StdEncoding.EncodeToString(value) case bool: diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 4cea26616..3ab928799 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -43344,6 +43344,12 @@ func (c *EC2) MoveByoipCidrToIpamRequest(input *MoveByoipCidrToIpamInput) (req * // // Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool. // +// If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can +// move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR +// to IPAM. If you are bringing a new IP address to Amazon Web Services for +// the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM +// (/vpc/latest/ipam/tutorials-byoip-ipam.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -43507,7 +43513,7 @@ func (c *EC2) ProvisionIpamPoolCidrRequest(input *ProvisionIpamPoolCidrInput) (r // ProvisionIpamPoolCidr API operation for Amazon Elastic Compute Cloud. // -// Provision a CIDR to an IPAM pool. You can use thsi action to provision new +// Provision a CIDR to an IPAM pool. You can use this action to provision new // CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to // a pool within it. // @@ -61546,7 +61552,7 @@ type CreateFleetInput struct { // desired capacity, and returns errors for any instances that could not // be launched. // - // For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/ec2-fleet-request-type.html) + // For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-request-type.html) // in the Amazon EC2 User Guide. Type *string `type:"string" enum:"FleetType"` @@ -62827,7 +62833,9 @@ type CreateIpamPoolInput struct { // The IP protocol assigned to this IPAM pool. You must choose either IPv4 or // IPv6 protocol for a pool. - AddressFamily *string `type:"string" enum:"AddressFamily"` + // + // AddressFamily is a required field + AddressFamily *string `type:"string" required:"true" enum:"AddressFamily"` // The default netmask length for allocations added to this pool. If, for example, // the CIDR assigned to this pool is 10.0.0.0/8 and you enter 16 here, new allocations @@ -62937,6 +62945,9 @@ func (s CreateIpamPoolInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateIpamPoolInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateIpamPoolInput"} + if s.AddressFamily == nil { + invalidParams.Add(request.NewErrParamRequired("AddressFamily")) + } if s.IpamScopeId == nil { invalidParams.Add(request.NewErrParamRequired("IpamScopeId")) } @@ -71710,6 +71721,25 @@ func (s DeleteInternetGatewayOutput) GoString() string { type DeleteIpamInput struct { _ struct{} `type:"structure"` + // Enables you to quickly delete an IPAM, private scopes, pools in private scopes, + // and any allocations in the pools in private scopes. You cannot delete the + // IPAM with this option if there is a pool in your public scope. If you use + // this option, IPAM does the following: + // + // * Deallocates any CIDRs allocated to VPC resources (such as VPCs) in pools + // in private scopes. No VPC resources are deleted as a result of enabling + // this option. The CIDR associated with the resource will no longer be allocated + // from an IPAM pool, but the CIDR itself will remain unchanged. + // + // * Deprovisions all IPv4 CIDRs provisioned to IPAM pools in private scopes. + // + // * Deletes all IPAM pools in private scopes. + // + // * Deletes all non-default private scopes in the IPAM. + // + // * Deletes the default public and private scopes and the IPAM. + Cascade *bool `type:"boolean"` + // A check for whether you have the required permissions for the action without // actually making the request and provides an error response. If you have the // required permissions, the error response is DryRunOperation. Otherwise, it @@ -71753,6 +71783,12 @@ func (s *DeleteIpamInput) Validate() error { return nil } +// SetCascade sets the Cascade field's value. +func (s *DeleteIpamInput) SetCascade(v bool) *DeleteIpamInput { + s.Cascade = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *DeleteIpamInput) SetDryRun(v bool) *DeleteIpamInput { s.DryRun = &v @@ -77092,7 +77128,7 @@ type DescribeAccountAttributesInput struct { _ struct{} `type:"structure"` // The account attribute names. - AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list" enum:"AccountAttributeName"` // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have @@ -83910,7 +83946,7 @@ type DescribeInstanceTypesInput struct { // The instance types. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon EC2 User Guide. - InstanceTypes []*string `locationName:"InstanceType" type:"list"` + InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"` // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the next @@ -91463,7 +91499,7 @@ type DescribeSpotPriceHistoryInput struct { Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // Filters the results by the specified instance types. - InstanceTypes []*string `locationName:"InstanceType" type:"list"` + InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"` // The maximum number of results to return in a single call. Specify a value // between 1 and 1000. The default value is 1000. To retrieve the remaining @@ -106738,7 +106774,7 @@ type GetInstanceTypesFromInstanceRequirementsInput struct { // The processor architecture type. // // ArchitectureTypes is a required field - ArchitectureTypes []*string `locationName:"ArchitectureType" locationNameList:"item" type:"list" required:"true"` + ArchitectureTypes []*string `locationName:"ArchitectureType" locationNameList:"item" type:"list" required:"true" enum:"ArchitectureType"` // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have @@ -106762,7 +106798,7 @@ type GetInstanceTypesFromInstanceRequirementsInput struct { // The virtualization type. // // VirtualizationTypes is a required field - VirtualizationTypes []*string `locationName:"VirtualizationType" locationNameList:"item" type:"list" required:"true"` + VirtualizationTypes []*string `locationName:"VirtualizationType" locationNameList:"item" type:"list" required:"true" enum:"VirtualizationType"` } // String returns the string representation. @@ -115566,7 +115602,7 @@ type InstanceRequirements struct { // * For instance types with Xilinx devices, specify xilinx. // // Default: Any manufacturer - AcceleratorManufacturers []*string `locationName:"acceleratorManufacturerSet" locationNameList:"item" type:"list"` + AcceleratorManufacturers []*string `locationName:"acceleratorManufacturerSet" locationNameList:"item" type:"list" enum:"AcceleratorManufacturer"` // The accelerators that must be on the instance type. // @@ -115585,7 +115621,7 @@ type InstanceRequirements struct { // * For instance types with Xilinx VU9P FPGAs, specify vu9p. // // Default: Any accelerator - AcceleratorNames []*string `locationName:"acceleratorNameSet" locationNameList:"item" type:"list"` + AcceleratorNames []*string `locationName:"acceleratorNameSet" locationNameList:"item" type:"list" enum:"AcceleratorName"` // The minimum and maximum amount of total accelerator memory, in MiB. // @@ -115601,7 +115637,7 @@ type InstanceRequirements struct { // * For instance types with inference accelerators, specify inference. // // Default: Any accelerator type - AcceleratorTypes []*string `locationName:"acceleratorTypeSet" locationNameList:"item" type:"list"` + AcceleratorTypes []*string `locationName:"acceleratorTypeSet" locationNameList:"item" type:"list" enum:"AcceleratorType"` // Indicates whether bare metal instance types must be included, excluded, or // required. @@ -115647,7 +115683,7 @@ type InstanceRequirements struct { // Image (AMI) that you specify in your launch template. // // Default: Any manufacturer - CpuManufacturers []*string `locationName:"cpuManufacturerSet" locationNameList:"item" type:"list"` + CpuManufacturers []*string `locationName:"cpuManufacturerSet" locationNameList:"item" type:"list" enum:"CpuManufacturer"` // The instance types to exclude. You can use strings with one or more wild // cards, represented by an asterisk (*), to exclude an instance type, size, @@ -115673,7 +115709,7 @@ type InstanceRequirements struct { // For previous generation instance types, specify previous. // // Default: Current and previous generation instance types - InstanceGenerations []*string `locationName:"instanceGenerationSet" locationNameList:"item" type:"list"` + InstanceGenerations []*string `locationName:"instanceGenerationSet" locationNameList:"item" type:"list" enum:"InstanceGeneration"` // Indicates whether instance types with instance store volumes are included, // excluded, or required. For more information, Amazon EC2 instance store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) @@ -115696,7 +115732,7 @@ type InstanceRequirements struct { // * For instance types with solid state drive (SDD) storage, specify sdd. // // Default: hdd and sdd - LocalStorageTypes []*string `locationName:"localStorageTypeSet" locationNameList:"item" type:"list"` + LocalStorageTypes []*string `locationName:"localStorageTypeSet" locationNameList:"item" type:"list" enum:"LocalStorageType"` // The minimum and maximum amount of memory per vCPU, in GiB. // @@ -115724,6 +115760,10 @@ type InstanceRequirements struct { // This parameter is not supported for GetSpotPlacementScores (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) // and GetInstanceTypesFromInstanceRequirements (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html). // + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection + // threshold is applied based on the per-vCPU or per-memory price instead of + // the per-instance price. + // // Default: 20 OnDemandMaxPricePercentageOverLowestPrice *int64 `locationName:"onDemandMaxPricePercentageOverLowestPrice" type:"integer"` @@ -115747,6 +115787,10 @@ type InstanceRequirements struct { // This parameter is not supported for GetSpotPlacementScores (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) // and GetInstanceTypesFromInstanceRequirements (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html). // + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection + // threshold is applied based on the per-vCPU or per-memory price instead of + // the per-instance price. + // // Default: 100 SpotMaxPricePercentageOverLowestPrice *int64 `locationName:"spotMaxPricePercentageOverLowestPrice" type:"integer"` @@ -115940,7 +115984,7 @@ type InstanceRequirementsRequest struct { // * For instance types with Xilinx devices, specify xilinx. // // Default: Any manufacturer - AcceleratorManufacturers []*string `locationName:"AcceleratorManufacturer" locationNameList:"item" type:"list"` + AcceleratorManufacturers []*string `locationName:"AcceleratorManufacturer" locationNameList:"item" type:"list" enum:"AcceleratorManufacturer"` // The accelerators that must be on the instance type. // @@ -115959,7 +116003,7 @@ type InstanceRequirementsRequest struct { // * For instance types with Xilinx VU9P FPGAs, specify vu9p. // // Default: Any accelerator - AcceleratorNames []*string `locationName:"AcceleratorName" locationNameList:"item" type:"list"` + AcceleratorNames []*string `locationName:"AcceleratorName" locationNameList:"item" type:"list" enum:"AcceleratorName"` // The minimum and maximum amount of total accelerator memory, in MiB. // @@ -115975,7 +116019,7 @@ type InstanceRequirementsRequest struct { // * To include instance types with inference hardware, specify inference. // // Default: Any accelerator type - AcceleratorTypes []*string `locationName:"AcceleratorType" locationNameList:"item" type:"list"` + AcceleratorTypes []*string `locationName:"AcceleratorType" locationNameList:"item" type:"list" enum:"AcceleratorType"` // Indicates whether bare metal instance types must be included, excluded, or // required. @@ -116021,7 +116065,7 @@ type InstanceRequirementsRequest struct { // Image (AMI) that you specify in your launch template. // // Default: Any manufacturer - CpuManufacturers []*string `locationName:"CpuManufacturer" locationNameList:"item" type:"list"` + CpuManufacturers []*string `locationName:"CpuManufacturer" locationNameList:"item" type:"list" enum:"CpuManufacturer"` // The instance types to exclude. You can use strings with one or more wild // cards, represented by an asterisk (*), to exclude an instance family, type, @@ -116047,7 +116091,7 @@ type InstanceRequirementsRequest struct { // For previous generation instance types, specify previous. // // Default: Current and previous generation instance types - InstanceGenerations []*string `locationName:"InstanceGeneration" locationNameList:"item" type:"list"` + InstanceGenerations []*string `locationName:"InstanceGeneration" locationNameList:"item" type:"list" enum:"InstanceGeneration"` // Indicates whether instance types with instance store volumes are included, // excluded, or required. For more information, Amazon EC2 instance store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) @@ -116070,7 +116114,7 @@ type InstanceRequirementsRequest struct { // * For instance types with solid state drive (SDD) storage, specify sdd. // // Default: hdd and sdd - LocalStorageTypes []*string `locationName:"LocalStorageType" locationNameList:"item" type:"list"` + LocalStorageTypes []*string `locationName:"LocalStorageType" locationNameList:"item" type:"list" enum:"LocalStorageType"` // The minimum and maximum amount of memory per vCPU, in GiB. // @@ -116100,6 +116144,10 @@ type InstanceRequirementsRequest struct { // This parameter is not supported for GetSpotPlacementScores (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) // and GetInstanceTypesFromInstanceRequirements (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html). // + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection + // threshold is applied based on the per-vCPU or per-memory price instead of + // the per-instance price. + // // Default: 20 OnDemandMaxPricePercentageOverLowestPrice *int64 `type:"integer"` @@ -116123,6 +116171,10 @@ type InstanceRequirementsRequest struct { // This parameter is not supported for GetSpotPlacementScores (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) // and GetInstanceTypesFromInstanceRequirements (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html). // + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection + // threshold is applied based on the per-vCPU or per-memory price instead of + // the per-instance price. + // // Default: 100 SpotMaxPricePercentageOverLowestPrice *int64 `type:"integer"` @@ -116317,14 +116369,14 @@ type InstanceRequirementsWithMetadataRequest struct { _ struct{} `type:"structure"` // The architecture type. - ArchitectureTypes []*string `locationName:"ArchitectureType" locationNameList:"item" type:"list"` + ArchitectureTypes []*string `locationName:"ArchitectureType" locationNameList:"item" type:"list" enum:"ArchitectureType"` // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with those attributes. InstanceRequirements *InstanceRequirementsRequest `type:"structure"` // The virtualization type. - VirtualizationTypes []*string `locationName:"VirtualizationType" locationNameList:"item" type:"list"` + VirtualizationTypes []*string `locationName:"VirtualizationType" locationNameList:"item" type:"list" enum:"VirtualizationType"` } // String returns the string representation. @@ -116965,16 +117017,16 @@ type InstanceTypeInfo struct { // The supported boot modes. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) // in the Amazon EC2 User Guide. - SupportedBootModes []*string `locationName:"supportedBootModes" locationNameList:"item" type:"list"` + SupportedBootModes []*string `locationName:"supportedBootModes" locationNameList:"item" type:"list" enum:"BootModeType"` // The supported root device types. - SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"` + SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list" enum:"RootDeviceType"` // Indicates whether the instance type is offered for spot or On-Demand. - SupportedUsageClasses []*string `locationName:"supportedUsageClasses" locationNameList:"item" type:"list"` + SupportedUsageClasses []*string `locationName:"supportedUsageClasses" locationNameList:"item" type:"list" enum:"UsageClassType"` // The supported virtualization types. - SupportedVirtualizationTypes []*string `locationName:"supportedVirtualizationTypes" locationNameList:"item" type:"list"` + SupportedVirtualizationTypes []*string `locationName:"supportedVirtualizationTypes" locationNameList:"item" type:"list" enum:"VirtualizationType"` // Describes the vCPU configurations for the instance type. VCpuInfo *VCpuInfo `locationName:"vCpuInfo" type:"structure"` @@ -118591,7 +118643,7 @@ func (s *IpamResourceTag) SetValue(v string) *IpamResourceTag { // overlap or conflict. // // For more information, see How IPAM works (/vpc/latest/ipam/how-it-works-ipam.html) -// in the Amazon VPC IPAM User Guide +// in the Amazon VPC IPAM User Guide. type IpamScope struct { _ struct{} `type:"structure"` @@ -128189,7 +128241,7 @@ type ModifyTrafficMirrorFilterNetworkServicesInput struct { _ struct{} `type:"structure"` // The network service, for example Amazon DNS, that you want to mirror. - AddNetworkServices []*string `locationName:"AddNetworkService" locationNameList:"item" type:"list"` + AddNetworkServices []*string `locationName:"AddNetworkService" locationNameList:"item" type:"list" enum:"TrafficMirrorNetworkService"` // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have @@ -128198,7 +128250,7 @@ type ModifyTrafficMirrorFilterNetworkServicesInput struct { DryRun *bool `type:"boolean"` // The network service, for example Amazon DNS, that you no longer want to mirror. - RemoveNetworkServices []*string `locationName:"RemoveNetworkService" locationNameList:"item" type:"list"` + RemoveNetworkServices []*string `locationName:"RemoveNetworkService" locationNameList:"item" type:"list" enum:"TrafficMirrorNetworkService"` // The ID of the Traffic Mirror filter. // @@ -128317,7 +128369,7 @@ type ModifyTrafficMirrorFilterRuleInput struct { // // When you remove a property from a Traffic Mirror filter rule, the property // is set to the default. - RemoveFields []*string `locationName:"RemoveField" type:"list"` + RemoveFields []*string `locationName:"RemoveField" type:"list" enum:"TrafficMirrorFilterRuleField"` // The action to assign to the rule. RuleAction *string `type:"string" enum:"TrafficMirrorRuleAction"` @@ -128499,7 +128551,7 @@ type ModifyTrafficMirrorSessionInput struct { // // When you remove a property from a Traffic Mirror session, the property is // set to the default. - RemoveFields []*string `locationName:"RemoveField" type:"list"` + RemoveFields []*string `locationName:"RemoveField" type:"list" enum:"TrafficMirrorSessionField"` // The session number determines the order in which sessions are evaluated when // an interface is used by multiple sessions. The first session with a matching @@ -133703,7 +133755,7 @@ type PacketHeaderStatement struct { DestinationPrefixLists []*string `locationName:"destinationPrefixListSet" locationNameList:"item" type:"list"` // The protocols. - Protocols []*string `locationName:"protocolSet" locationNameList:"item" type:"list"` + Protocols []*string `locationName:"protocolSet" locationNameList:"item" type:"list" enum:"Protocol"` // The source addresses. SourceAddresses []*string `locationName:"sourceAddressSet" locationNameList:"item" type:"list"` @@ -133789,7 +133841,7 @@ type PacketHeaderStatementRequest struct { DestinationPrefixLists []*string `locationName:"DestinationPrefixList" locationNameList:"item" type:"list"` // The protocols. - Protocols []*string `locationName:"Protocol" locationNameList:"item" type:"list"` + Protocols []*string `locationName:"Protocol" locationNameList:"item" type:"list" enum:"Protocol"` // The source addresses. SourceAddresses []*string `locationName:"SourceAddress" locationNameList:"item" type:"list"` @@ -134933,7 +134985,7 @@ type PlacementGroupInfo struct { _ struct{} `type:"structure"` // The supported placement group types. - SupportedStrategies []*string `locationName:"supportedStrategies" locationNameList:"item" type:"list"` + SupportedStrategies []*string `locationName:"supportedStrategies" locationNameList:"item" type:"list" enum:"PlacementGroupStrategy"` } // String returns the string representation. @@ -135756,7 +135808,7 @@ type ProcessorInfo struct { _ struct{} `type:"structure"` // The architectures supported by the instance type. - SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list"` + SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list" enum:"ArchitectureType"` // The speed of the processor, in GHz. SustainedClockSpeedInGhz *float64 `locationName:"sustainedClockSpeedInGhz" type:"double"` @@ -138530,7 +138582,9 @@ type ReleaseIpamPoolAllocationInput struct { DryRun *bool `type:"boolean"` // The ID of the allocation. - IpamPoolAllocationId *string `type:"string"` + // + // IpamPoolAllocationId is a required field + IpamPoolAllocationId *string `type:"string" required:"true"` // The ID of the IPAM pool which contains the allocation you want to release. // @@ -138562,6 +138616,9 @@ func (s *ReleaseIpamPoolAllocationInput) Validate() error { if s.Cidr == nil { invalidParams.Add(request.NewErrParamRequired("Cidr")) } + if s.IpamPoolAllocationId == nil { + invalidParams.Add(request.NewErrParamRequired("IpamPoolAllocationId")) + } if s.IpamPoolId == nil { invalidParams.Add(request.NewErrParamRequired("IpamPoolId")) } @@ -139680,7 +139737,7 @@ type ReportInstanceStatusInput struct { // * other: [explain using the description parameter] // // ReasonCodes is a required field - ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true" enum:"ReportInstanceReasonCodes"` // The time at which the reported instance health state began. StartTime *time.Time `locationName:"startTime" type:"timestamp"` @@ -152926,7 +152983,7 @@ type TrafficMirrorFilter struct { IngressFilterRules []*TrafficMirrorFilterRule `locationName:"ingressFilterRuleSet" locationNameList:"item" type:"list"` // The network service traffic that is associated with the Traffic Mirror filter. - NetworkServices []*string `locationName:"networkServiceSet" locationNameList:"item" type:"list"` + NetworkServices []*string `locationName:"networkServiceSet" locationNameList:"item" type:"list" enum:"TrafficMirrorNetworkService"` // The tags assigned to the Traffic Mirror filter. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index a55357d29..9f05ae2b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -8691,7 +8691,7 @@ type CreateGrantInput struct { // in the Key Management Service Developer Guide. // // Operations is a required field - Operations []*string `type:"list" required:"true"` + Operations []*string `type:"list" required:"true" enum:"GrantOperation"` // The principal that has permission to use the RetireGrant operation to retire // the grant. @@ -12407,7 +12407,7 @@ type GetPublicKeyOutput struct { // // This field appears in the response only when the KeyUsage of the public key // is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list"` + EncryptionAlgorithms []*string `type:"list" enum:"EncryptionAlgorithmSpec"` // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) // of the asymmetric KMS key from which the public key was downloaded. @@ -12436,7 +12436,7 @@ type GetPublicKeyOutput struct { // // This field appears in the response only when the KeyUsage of the public key // is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list"` + SigningAlgorithms []*string `type:"list" enum:"SigningAlgorithmSpec"` } // String returns the string representation. @@ -12603,7 +12603,7 @@ type GrantListEntry struct { Name *string `min:"1" type:"string"` // The list of operations permitted by the grant. - Operations []*string `type:"list"` + Operations []*string `type:"list" enum:"GrantOperation"` // The principal that can retire the grant. RetiringPrincipal *string `min:"1" type:"string"` @@ -13863,7 +13863,7 @@ type KeyMetadata struct { // key with other encryption algorithms within KMS. // // This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list"` + EncryptionAlgorithms []*string `type:"list" enum:"EncryptionAlgorithmSpec"` // Specifies whether the KMS key's key material expires. This value is present // only when Origin is EXTERNAL, otherwise this value is omitted. @@ -13944,7 +13944,7 @@ type KeyMetadata struct { // key with other signing algorithms within KMS. // // This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list"` + SigningAlgorithms []*string `type:"list" enum:"SigningAlgorithmSpec"` // The time at which the imported key material expires. When the key material // expires, KMS deletes the key material and the KMS key becomes unusable. This diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 1e7fa6557..718409b54 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -507,8 +507,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // Returns a set of temporary security credentials for users who have been authenticated // in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). // // For mobile applications, we recommend that you use Amazon Cognito. You can // use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide @@ -1537,7 +1538,7 @@ type AssumeRoleInput struct { // the new session inherits any transitive session tags from the calling session. // If you pass a session tag with the same key as an inherited tag, the operation // fails. To view the inherited tags for a session, see the CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []*Tag `type:"list"` @@ -2220,11 +2221,12 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` - // The fully qualified host component of the domain name of the identity provider. + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. // // Do not specify this value for OpenID Connect ID tokens. ProviderId *string `min:"4" type:"string"` diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore new file mode 100644 index 000000000..c01141aa4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -0,0 +1,22 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ +**/dependency-reduced-pom.xml + +# Gradle +/.gradle +build/ +*/out/ +*/*/out/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml new file mode 100644 index 000000000..f8d1035cc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.travis.yml @@ -0,0 +1,28 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +script: + - make go test -v ./...; + diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md new file mode 100644 index 000000000..c1daf6741 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -0,0 +1,117 @@ +# Release (v1.11.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.1 + * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) + +# Release (v1.11.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.0 + * **Feature**: Updates deserialization of header list to supported quoted strings + +# Release (v1.10.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.10.0 + * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. + +# Release (v1.9.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.1 + * **Documentation**: Fixes various typos in Go package documentation. + +# Release (v1.9.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.0 + * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. + * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. + +# Release v1.8.1 + +### Smithy Go Module +* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. + * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) + +# Release v1.8.0 + +### Smithy Go Module + +* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) + * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. + * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) + +# Release v1.7.0 + +### Smithy Go Module +* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) + * Handle error for defer close call +* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) + * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. +* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) + +### Codegen +* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) + * Adds support for Smithy Document shapes and supporting types for protocols to implement support + +# Release v1.6.0 (2021-07-15) + +### Smithy Go Module +* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) + +### Codegen +* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) +* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) + +# Release v1.5.0 (2021-06-25) + +### Smithy Go module +* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) + * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. + +### Codegen +* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) +* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) +* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) +* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) +* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) +* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) + +# Release v1.4.0 (2021-05-06) + +### Smithy Go module +* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) + +### Codegen +* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) +* Add support for httpQueryParams location +* Add support for model renaming conflict resolution with service closure + +# Release v1.3.1 (2021-04-08) + +### Smithy Go module +* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) +* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) + +# Release v1.3.0 (2021-04-01) + +### Smithy Go module +* `transport/http`: Add utility to safely join string to url path, and url raw query. + +### Codegen +* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. + +# Release v1.2.0 (2021-03-12) + +### Smithy Go module +* Fix support for parsing shortened year format in HTTP Date header. +* Fix GitHub APIDiff action workflow to get gorelease tool correctly. +* Fix codegen artifact unit test for Go 1.16 + +### Codegen +* Fix generating paginator nil parameter handling before usage. +* Fix Serialize unboxed members decorated as required. +* Add ability to define resolvers at both client construction and operation invocation. +* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..5b627cfa6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md new file mode 100644 index 000000000..c4b6a1c50 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *main* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile new file mode 100644 index 000000000..b8c657435 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -0,0 +1,63 @@ +PRE_RELEASE_VERSION ?= + +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +smithy-publish-local: + cd codegen && ./gradlew publishToMavenLocal + +smithy-build: + cd codegen && ./gradlew build + +smithy-clean: + cd codegen && ./gradlew clean + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +module-version: + @go run ${REPOTOOLS_CMD_MODULE_VERSION} . + +############## +# Repo Tools # +############## +.PHONY: install-changelog + +install-changelog: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE new file mode 100644 index 000000000..616fc5889 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md new file mode 100644 index 000000000..789b37889 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/README.md @@ -0,0 +1,12 @@ +## Smithy Go + +[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) + +Smithy code generators for Go. + +**WARNING: All interfaces are subject to change.** + +## License + +This project is licensed under the Apache-2.0 License. + diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go new file mode 100644 index 000000000..87b0c74b7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/doc.go @@ -0,0 +1,2 @@ +// Package smithy provides the core components for a Smithy SDK. +package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go new file mode 100644 index 000000000..dec498c57 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document.go @@ -0,0 +1,10 @@ +package smithy + +// Document provides access to loosely structured data in a document-like +// format. +// +// Deprecated: See the github.com/aws/smithy-go/document package. +type Document interface { + UnmarshalDocument(interface{}) error + GetValue() (interface{}, error) +} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go new file mode 100644 index 000000000..03055b7a1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/doc.go @@ -0,0 +1,12 @@ +// Package document provides interface definitions and error types for document types. +// +// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send +// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 +// strings to these values. +// +// API Clients expose document constructors in their respective client document packages which must be used to +// Marshal and Unmarshal Go types to and from their respective protocol representations. +// +// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from +// document types. +package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go new file mode 100644 index 000000000..8f852d95c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/document.go @@ -0,0 +1,153 @@ +package document + +import ( + "fmt" + "math/big" + "strconv" +) + +// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and +// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. +// +// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. +// Anonymous nested types are flattened based on Go anonymous type visibility. +// +// When defining struct types. the `document` struct tag can be used to control how the value will be +// marshaled into the resulting protocol document. +// +// // Field is ignored +// Field int `document:"-"` +// +// // Field object of key "myName" +// Field int `document:"myName"` +// +// // Field object key of key "myName", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:"myName,omitempty"` +// +// // Field object key of "Field", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:",omitempty"` +// +// All struct fields, including anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - document field tag is "-" +// - document field tag specifies "omitempty", and is a zero value. +// +// Pointer and interface values are encoded as the value pointed to or +// contained in the interface. A nil value encodes as a null +// value unless `omitempty` struct tag is provided. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented +// by your application as a string or numerical representation. +// +// Errors that occur when marshaling will stop the marshaler, and return the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +type Marshaler interface { + MarshalSmithyDocument() ([]byte, error) +} + +// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and +// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be +// returned. +// +// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` +// struct field tag for controlling how struct fields are unmarshaled. +// +// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document +// into an empty interface the Unmarshaler will store one of these values: +// bool, for boolean values +// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) +// string, for string values +// []interface{}, for array values +// map[string]interface{}, for objects +// nil, for null values +// +// When unmarshaling, any error that occurs will halt the unmarshal and return the error. +type Unmarshaler interface { + UnmarshalSmithyDocument(v interface{}) error +} + +type noSerde interface { + noSmithyDocumentSerde() +} + +// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled +// into a protocol document. +type NoSerde struct{} + +func (n NoSerde) noSmithyDocumentSerde() {} + +var _ noSerde = (*NoSerde)(nil) + +// IsNoSerde returns whether the given type implements the no smithy document serde interface. +func IsNoSerde(x interface{}) bool { + _, ok := x.(noSerde) + return ok +} + +// Number is an arbitrary precision numerical value +type Number string + +// Int64 returns the number as a string. +func (n Number) String() string { + return string(n) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return n.intOfBitSize(64) +} + +func (n Number) intOfBitSize(bitSize int) (int64, error) { + return strconv.ParseInt(string(n), 10, bitSize) +} + +// Uint64 returns the number as a uint64. +func (n Number) Uint64() (uint64, error) { + return n.uintOfBitSize(64) +} + +func (n Number) uintOfBitSize(bitSize int) (uint64, error) { + return strconv.ParseUint(string(n), 10, bitSize) +} + +// Float32 returns the number parsed as a 32-bit float, returns a float64. +func (n Number) Float32() (float64, error) { + return n.floatOfBitSize(32) +} + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return n.floatOfBitSize(64) +} + +// Float64 returns the number as a float64. +func (n Number) floatOfBitSize(bitSize int) (float64, error) { + return strconv.ParseFloat(string(n), bitSize) +} + +// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. +func (n Number) BigFloat() (*big.Float, error) { + f, ok := (&big.Float{}).SetString(string(n)) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} + +// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. +func (n Number) BigInt() (*big.Int, error) { + f, ok := (&big.Int{}).SetString(string(n), 10) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go new file mode 100644 index 000000000..046a7a765 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/errors.go @@ -0,0 +1,75 @@ +package document + +import ( + "fmt" + "reflect" +) + +// UnmarshalTypeError is an error type representing an error +// unmarshaling a Smithy document to a Go value type. This is different +// from UnmarshalError in that it does not wrap an underlying error type. +type UnmarshalTypeError struct { + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", + e.Value, e.Type.String()) +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a Smithy document to a Go value type. +type InvalidUnmarshalError struct { + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidUnmarshalError) Error() string { + var msg string + if e.Type == nil { + msg = "cannot unmarshal to nil value" + } else if e.Type.Kind() != reflect.Ptr { + msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) + } else { + msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) + } + + return fmt.Sprintf("unmarshal failed, %s", msg) +} + +// An UnmarshalError wraps an error that occurred while unmarshaling a +// Smithy document into a Go type. This is different from +// UnmarshalTypeError in that it wraps the underlying error that occurred. +type UnmarshalError struct { + Err error + Value string + Type reflect.Type +} + +// Unwrap returns the underlying unmarshaling error +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", + e.Value, e.Type.String(), e.Err) +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type. +type InvalidMarshalError struct { + Message string +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("marshal failed, %s", e.Message) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go new file mode 100644 index 000000000..792fdfa08 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/doc.go @@ -0,0 +1,4 @@ +// Package encoding provides utilities for encoding values for specific +// document encodings. + +package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go new file mode 100644 index 000000000..2fdfb5225 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + "math" + "strconv" +) + +// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol +// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers +// +// Based on encoding/json floatEncoder from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func EncodeFloat(dst []byte, v float64, bits int) []byte { + if math.IsInf(v, 0) || math.IsNaN(v) { + panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) + } + + abs := math.Abs(v) + fmt := byte('f') + + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + dst = strconv.AppendFloat(dst, v, fmt, -1, bits) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + + return dst +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go new file mode 100644 index 000000000..96abd073a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -0,0 +1,116 @@ +package httpbinding + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + contentLengthHeader = "Content-Length" + floatNaN = "NaN" + floatInfinity = "Infinity" + floatNegInfinity = "-Infinity" +) + +// An Encoder provides encoding of REST URI path, query, and header components +// of an HTTP request. Can also encode a stream as the payload. +// +// Does not support SetFields. +type Encoder struct { + path, rawPath, pathBuffer []byte + + query url.Values + header http.Header +} + +// NewEncoder creates a new encoder from the passed in request. All query and +// header values will be added on top of the request's existing values. Overwriting +// duplicate values. +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + parseQuery, err := url.ParseQuery(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query string: %w", err) + } + + e := &Encoder{ + path: []byte(path), + rawPath: []byte(path), + query: parseQuery, + header: headers.Clone(), + } + + return e, nil +} + +// Encode returns a REST protocol encoder for encoding HTTP bindings. +// +// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode +// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. +// +// Returns any error occurring during encoding. +func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { + req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) + req.URL.RawQuery = e.query.Encode() + + // net/http ignores Content-Length header and requires it to be set on http.Request + if v := e.header.Get(contentLengthHeader); len(v) > 0 { + iv, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + req.ContentLength = iv + e.header.Del(contentLengthHeader) + } + + req.Header = e.header + + return req, nil +} + +// AddHeader returns a HeaderValue for appending to the given header name +func (e *Encoder) AddHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, true) +} + +// SetHeader returns a HeaderValue for setting the given header name +func (e *Encoder) SetHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, false) +} + +// Headers returns a Header used for encoding headers with the given prefix +func (e *Encoder) Headers(prefix string) Headers { + return Headers{ + header: e.header, + prefix: strings.TrimSpace(prefix), + } +} + +// HasHeader returns if a header with the key specified exists with one or +// more value. +func (e Encoder) HasHeader(key string) bool { + return len(e.header[key]) != 0 +} + +// SetURI returns a URIValue used for setting the given path key +func (e *Encoder) SetURI(key string) URIValue { + return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) +} + +// SetQuery returns a QueryValue used for setting the given query key +func (e *Encoder) SetQuery(key string) QueryValue { + return NewQueryValue(e.query, key, false) +} + +// AddQuery returns a QueryValue used for appending the given query key +func (e *Encoder) AddQuery(key string) QueryValue { + return NewQueryValue(e.query, key, true) +} + +// HasQuery returns if a query with the key specified exists with one or +// more values. +func (e *Encoder) HasQuery(key string) bool { + return len(e.query.Get(key)) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go new file mode 100644 index 000000000..f9256e175 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go @@ -0,0 +1,122 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/http" + "strconv" + "strings" +) + +// Headers is used to encode header keys using a provided prefix +type Headers struct { + header http.Header + prefix string +} + +// AddHeader returns a HeaderValue used to append values to prefix+key +func (h Headers) AddHeader(key string) HeaderValue { + return h.newHeaderValue(key, true) +} + +// SetHeader returns a HeaderValue used to set the value of prefix+key +func (h Headers) SetHeader(key string) HeaderValue { + return h.newHeaderValue(key, false) +} + +func (h Headers) newHeaderValue(key string, append bool) HeaderValue { + return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) +} + +// HeaderValue is used to encode values to an HTTP header +type HeaderValue struct { + header http.Header + key string + append bool +} + +func newHeaderValue(header http.Header, key string, append bool) HeaderValue { + return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} +} + +func (h HeaderValue) modifyHeader(value string) { + if h.append { + h.header[h.key] = append(h.header[h.key], value) + } else { + h.header[h.key] = append(h.header[h.key][:0], value) + } +} + +// String encodes the value v as the header string value +func (h HeaderValue) String(v string) { + h.modifyHeader(v) +} + +// Byte encodes the value v as a query string value +func (h HeaderValue) Byte(v int8) { + h.Long(int64(v)) +} + +// Short encodes the value v as a query string value +func (h HeaderValue) Short(v int16) { + h.Long(int64(v)) +} + +// Integer encodes the value v as the header string value +func (h HeaderValue) Integer(v int32) { + h.Long(int64(v)) +} + +// Long encodes the value v as the header string value +func (h HeaderValue) Long(v int64) { + h.modifyHeader(strconv.FormatInt(v, 10)) +} + +// Boolean encodes the value v as a query string value +func (h HeaderValue) Boolean(v bool) { + h.modifyHeader(strconv.FormatBool(v)) +} + +// Float encodes the value v as a query string value +func (h HeaderValue) Float(v float32) { + h.float(float64(v), 32) +} + +// Double encodes the value v as a query string value +func (h HeaderValue) Double(v float64) { + h.float(v, 64) +} + +func (h HeaderValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + h.String(floatNaN) + case math.IsInf(v, 1): + h.String(floatInfinity) + case math.IsInf(v, -1): + h.String(floatNegInfinity) + default: + h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes the value v as a query string value +func (h HeaderValue) BigInteger(v *big.Int) { + h.modifyHeader(v.String()) +} + +// BigDecimal encodes the value v as a query string value +func (h HeaderValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + h.Long(i) + return + } + h.modifyHeader(v.Text('e', -1)) +} + +// Blob encodes the value v as a base64 header string value +func (h HeaderValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + h.modifyHeader(encodeToString) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go new file mode 100644 index 000000000..e78926c9a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "bytes" + "fmt" +) + +const ( + uriTokenStart = '{' + uriTokenStop = '}' + uriTokenSkip = '+' +) + +func bufCap(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, 0, n) + } + + return b[0:0] +} + +// replacePathElement replaces a single element in the path []byte. +// Escape is used to control whether the value will be escaped using Amazon path escape style. +func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } + + if escape { + val = EscapePath(val, encodeSep) + } + + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + + fieldBuf = bufCap(fieldBuf, len(val)) + fieldBuf = append(fieldBuf, val...) + + keyLen := end - start + valLen := len(fieldBuf) + + if keyLen == valLen { + copy(path[start:], fieldBuf) + return path, fieldBuf, nil + } + + newLen := len(path) + (valLen - keyLen) + if len(path) < newLen { + path = path[:cap(path)] + } + if cap(path) < newLen { + newURI := make([]byte, newLen) + copy(newURI, path) + path = newURI + } + + // shift + copy(path[start+valLen:], path[end:]) + path = path[:newLen] + copy(path[start:], fieldBuf) + + return path, fieldBuf, nil +} + +// EscapePath escapes part of a URL path in Amazon style. +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go new file mode 100644 index 000000000..c2e7d0a20 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go @@ -0,0 +1,107 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/url" + "strconv" +) + +// QueryValue is used to encode query key values +type QueryValue struct { + query url.Values + key string + append bool +} + +// NewQueryValue creates a new QueryValue which enables encoding +// a query value into the given url.Values. +func NewQueryValue(query url.Values, key string, append bool) QueryValue { + return QueryValue{ + query: query, + key: key, + append: append, + } +} + +func (qv QueryValue) updateKey(value string) { + if qv.append { + qv.query.Add(qv.key, value) + } else { + qv.query.Set(qv.key, value) + } +} + +// Blob encodes v as a base64 query string value +func (qv QueryValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + qv.updateKey(encodeToString) +} + +// Boolean encodes v as a query string value +func (qv QueryValue) Boolean(v bool) { + qv.updateKey(strconv.FormatBool(v)) +} + +// String encodes v as a query string value +func (qv QueryValue) String(v string) { + qv.updateKey(v) +} + +// Byte encodes v as a query string value +func (qv QueryValue) Byte(v int8) { + qv.Long(int64(v)) +} + +// Short encodes v as a query string value +func (qv QueryValue) Short(v int16) { + qv.Long(int64(v)) +} + +// Integer encodes v as a query string value +func (qv QueryValue) Integer(v int32) { + qv.Long(int64(v)) +} + +// Long encodes v as a query string value +func (qv QueryValue) Long(v int64) { + qv.updateKey(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (qv QueryValue) Float(v float32) { + qv.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (qv QueryValue) Double(v float64) { + qv.float(v, 64) +} + +func (qv QueryValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + qv.String(floatNaN) + case math.IsInf(v, 1): + qv.String(floatInfinity) + case math.IsInf(v, -1): + qv.String(floatNegInfinity) + default: + qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (qv QueryValue) BigInteger(v *big.Int) { + qv.updateKey(v.String()) +} + +// BigDecimal encodes v as a query string value +func (qv QueryValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + qv.Long(i) + return + } + qv.updateKey(v.Text('e', -1)) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go new file mode 100644 index 000000000..64e40121e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "math" + "math/big" + "strconv" + "strings" +) + +// URIValue is used to encode named URI parameters +type URIValue struct { + path, rawPath, buffer *[]byte + + key string +} + +func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { + return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} +} + +func (u URIValue) modifyURI(value string) (err error) { + *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) + return err +} + +// Boolean encodes v as a URI string value +func (u URIValue) Boolean(v bool) error { + return u.modifyURI(strconv.FormatBool(v)) +} + +// String encodes v as a URI string value +func (u URIValue) String(v string) error { + return u.modifyURI(v) +} + +// Byte encodes v as a URI string value +func (u URIValue) Byte(v int8) error { + return u.Long(int64(v)) +} + +// Short encodes v as a URI string value +func (u URIValue) Short(v int16) error { + return u.Long(int64(v)) +} + +// Integer encodes v as a URI string value +func (u URIValue) Integer(v int32) error { + return u.Long(int64(v)) +} + +// Long encodes v as a URI string value +func (u URIValue) Long(v int64) error { + return u.modifyURI(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (u URIValue) Float(v float32) error { + return u.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (u URIValue) Double(v float64) error { + return u.float(v, 64) +} + +func (u URIValue) float(v float64, bitSize int) error { + switch { + case math.IsNaN(v): + return u.String(floatNaN) + case math.IsInf(v, 1): + return u.String(floatInfinity) + case math.IsInf(v, -1): + return u.String(floatNegInfinity) + default: + return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (u URIValue) BigInteger(v *big.Int) error { + return u.modifyURI(v.String()) +} + +// BigDecimal encodes v as a query string value +func (u URIValue) BigDecimal(v *big.Float) error { + if i, accuracy := v.Int64(); accuracy == big.Exact { + return u.Long(i) + } + return u.modifyURI(v.Text('e', -1)) +} + +// SplitURI parses a Smithy HTTP binding trait URI +func SplitURI(uri string) (path, query string) { + queryStart := strings.IndexRune(uri, '?') + if queryStart == -1 { + path = uri + return path, query + } + + path = uri[:queryStart] + if queryStart+1 >= len(uri) { + return path, query + } + query = uri[queryStart+1:] + + return path, query +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go new file mode 100644 index 000000000..508f3c997 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go @@ -0,0 +1,49 @@ +package xml + +// arrayMemberWrapper is the default member wrapper tag name for XML Array type +var arrayMemberWrapper = StartElement{ + Name: Name{Local: "member"}, +} + +// Array represents the encoding of a XML array type +type Array struct { + w writer + scratch *[]byte + + // member start element is the array member wrapper start element + memberStartElement StartElement + + // isFlattened indicates if the array is a flattened array. + isFlattened bool +} + +// newArray returns an array encoder. +// It also takes in the member start element, array start element. +// It takes in a isFlattened bool, indicating that an array is flattened array. +// +// A wrapped array ["value1", "value2"] is represented as +// `value1value2`. + +// A flattened array `someList: ["value1", "value2"]` is represented as +// `value1value2`. +func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { + var memberWrapper = memberStartElement + if isFlattened { + memberWrapper = arrayStartElement + } + + return &Array{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: isFlattened, + } +} + +// Member adds a new member to the XML array. +// It returns a Value encoder. +func (a *Array) Member() Value { + v := newValue(a.w, a.scratch, a.memberStartElement) + v.isFlattened = a.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go new file mode 100644 index 000000000..ccee90a63 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go @@ -0,0 +1,10 @@ +package xml + +const ( + leftAngleBracket = '<' + rightAngleBracket = '>' + forwardSlash = '/' + colon = ':' + equals = '=' + quote = '"' +) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go new file mode 100644 index 000000000..d6e1e41e1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -0,0 +1,49 @@ +/* +Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to +shape serializer function in which a xml.Value will be passed around. + +Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html# + +Member Element + +Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element +write their own element start tag. These elements should always be closed. + +Flattened Element + +Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element +do not write a start tag, and thus should not be closed. + +Simple types encoding + +All simple type methods on value such as String(), Long() etc; auto close the associated member element. + +Array + +Array returns the collection encoder. It has two modes, wrapped and flattened encoding. + +Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. +By default, a wrapped array members are wrapped with `member` named start element. + + appletree + +Flattened arrays rely on Value being marked as flattened. +If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. + + appletree + +Map + +Map is the map encoder. It has two modes, wrapped and flattened encoding. + +Wrapped map has Array() method, which facilitate map member wrapping. +By default, a wrapped map members are wrapped with `entry` named start element. + + appletreesnowice + +Flattened map rely on Value being marked as flattened. +If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. + + appletreesnowice +*/ +package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go new file mode 100644 index 000000000..ae84e7999 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +/* +NewAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. +*/ +func NewAttribute(local, value string) Attr { + return Attr{ + Name: Name{ + Local: local, + }, + Value: value, + } +} + +/* +NewNamespaceAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. + +NewNamespaceAttribute appends `xmlns:` in front of namespace +prefix. + +For creating a name space attribute representing +`xmlns:prefix="http://example.com`, the breakdown would be: +local = "prefix" +value = "http://example.com" +*/ +func NewNamespaceAttribute(local, value string) Attr { + attr := NewAttribute(local, value) + + // default name space identifier + attr.Name.Space = "xmlns" + return attr +} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +// Copy creates a new copy of StartElement. +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// returns true if start element local name is empty +func (e StartElement) isZero() bool { + return len(e.Name.Local) == 0 +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// returns true if end element local name is empty +func (e EndElement) isZero() bool { + return len(e.Name.Local) == 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go new file mode 100644 index 000000000..16fb3dddb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go @@ -0,0 +1,51 @@ +package xml + +// writer interface used by the xml encoder to write an encoded xml +// document in a writer. +type writer interface { + + // Write takes in a byte slice and returns number of bytes written and error + Write(p []byte) (n int, err error) + + // WriteRune takes in a rune and returns number of bytes written and error + WriteRune(r rune) (n int, err error) + + // WriteString takes in a string and returns number of bytes written and error + WriteString(s string) (n int, err error) + + // String method returns a string + String() string + + // Bytes return a byte slice. + Bytes() []byte +} + +// Encoder is an XML encoder that supports construction of XML values +// using methods. The encoder takes in a writer and maintains a scratch buffer. +type Encoder struct { + w writer + scratch *[]byte +} + +// NewEncoder returns an XML encoder +func NewEncoder(w writer) *Encoder { + scratch := make([]byte, 64) + + return &Encoder{w: w, scratch: &scratch} +} + +// String returns the string output of the XML encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the XML encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} + +// RootElement builds a root element encoding +// It writes it's start element tag. The value should be closed. +func (e Encoder) RootElement(element StartElement) Value { + return newValue(e.w, e.scratch, element) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go new file mode 100644 index 000000000..f3db6ccca --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go @@ -0,0 +1,51 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go new file mode 100644 index 000000000..1c5479af6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go @@ -0,0 +1,137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +import ( + "unicode/utf8" +) + +// Copied from Go 1.14 stdlib's encoding/xml +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character + + // Additional Escapes + escNextLine = []byte("…") + escLS = []byte("
") +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// TODO: When do we need to escape the string? +// Based on encoding/xml escapeString from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeString(e writer, s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.WriteString(s[last : i-width]) + e.Write(esc) + last = i + } + e.WriteString(s[last:]) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +// +// Based on encoding/xml escapeText from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeText(e writer, s []byte) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + // This always escapes newline, which is different than stdlib's optional + // escape of new line. + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.Write(s[last : i-width]) + e.Write(esc) + last = i + } + e.Write(s[last:]) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go new file mode 100644 index 000000000..e42858965 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go @@ -0,0 +1,53 @@ +package xml + +// mapEntryWrapper is the default member wrapper start element for XML Map entry +var mapEntryWrapper = StartElement{ + Name: Name{Local: "entry"}, +} + +// Map represents the encoding of a XML map type +type Map struct { + w writer + scratch *[]byte + + // member start element is the map entry wrapper start element + memberStartElement StartElement + + // isFlattened returns true if the map is a flattened map + isFlattened bool +} + +// newMap returns a map encoder which sets the default map +// entry wrapper to `entry`. +// +// A map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newMap(w writer, scratch *[]byte) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: mapEntryWrapper, + } +} + +// newFlattenedMap returns a map encoder which sets the map +// entry wrapper to the passed in memberWrapper`. +// +// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: true, + } +} + +// Entry returns a Value encoder with map's element. +// It writes the member wrapper start tag for each entry. +func (m *Map) Entry() Value { + v := newValue(m.w, m.scratch, m.memberStartElement) + v.isFlattened = m.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go new file mode 100644 index 000000000..09434b2c0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go @@ -0,0 +1,302 @@ +package xml + +import ( + "encoding/base64" + "fmt" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents an XML Value type +// XML Value types: Object, Array, Map, String, Number, Boolean. +type Value struct { + w writer + scratch *[]byte + + // xml start element is the associated start element for the Value + startElement StartElement + + // indicates if the Value represents a flattened shape + isFlattened bool +} + +// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag +func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { + return Value{ + w: w, + scratch: scratch, + startElement: startElement, + } +} + +// newValue writes the start element xml tag and returns a Value +func newValue(w writer, scratch *[]byte, startElement StartElement) Value { + writeStartElement(w, startElement) + return Value{w: w, scratch: scratch, startElement: startElement} +} + +// writeStartElement takes in a start element and writes it. +// It handles namespace, attributes in start element. +func writeStartElement(w writer, el StartElement) error { + if el.isZero() { + return fmt.Errorf("xml start element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + for _, attr := range el.Attr { + w.WriteRune(' ') + writeAttribute(w, &attr) + } + + w.WriteRune(rightAngleBracket) + return nil +} + +// writeAttribute writes an attribute from a provided Attribute +// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". +// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName +func writeAttribute(w writer, attr *Attr) { + // if local, space both are not empty + if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { + escapeString(w, attr.Name.Space) + w.WriteRune(colon) + } + + // if prefix is empty, the default `xmlns` space should be used as prefix. + if len(attr.Name.Local) == 0 { + attr.Name.Local = attr.Name.Space + } + + escapeString(w, attr.Name.Local) + w.WriteRune(equals) + w.WriteRune(quote) + escapeString(w, attr.Value) + w.WriteRune(quote) +} + +// writeEndElement takes in a end element and writes it. +func writeEndElement(w writer, el EndElement) error { + if el.isZero() { + return fmt.Errorf("xml end element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + w.WriteRune(forwardSlash) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + w.WriteRune(rightAngleBracket) + + return nil +} + +// String encodes v as a XML string. +// It will auto close the parent xml element tag. +func (xv Value) String(v string) { + escapeString(xv.w, v) + xv.Close() +} + +// Byte encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Byte(v int8) { + xv.Long(int64(v)) +} + +// Short encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Short(v int16) { + xv.Long(int64(v)) +} + +// Integer encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Integer(v int32) { + xv.Long(int64(v)) +} + +// Long encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Long(v int64) { + *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Float encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Float(v float32) { + xv.float(float64(v), 32) + xv.Close() +} + +// Double encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Double(v float64) { + xv.float(v, 64) + xv.Close() +} + +func (xv Value) float(v float64, bits int) { + *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) + xv.w.Write(*xv.scratch) +} + +// Boolean encodes v as a XML boolean. +// It will auto close the parent xml element tag. +func (xv Value) Boolean(v bool) { + *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Base64EncodeBytes writes v as a base64 value in XML string. +// It will auto close the parent xml element tag. +func (xv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(xv.w, (*xv.scratch)[:0], v) + xv.Close() +} + +// BigInteger encodes v big.Int as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigInteger(v *big.Int) { + xv.w.Write([]byte(v.Text(10))) + xv.Close() +} + +// BigDecimal encodes v big.Float as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + xv.Long(i) + return + } + + xv.w.Write([]byte(v.Text('e', -1))) + xv.Close() +} + +// Write writes v directly to the xml document +// if escapeXMLText is set to true, write will escape text. +// It will auto close the parent xml element tag. +func (xv Value) Write(v []byte, escapeXMLText bool) { + // escape and write xml text + if escapeXMLText { + escapeText(xv.w, v) + } else { + // write xml directly + xv.w.Write(v) + } + + xv.Close() +} + +// MemberElement does member element encoding. It returns a Value. +// Member Element method should be used for all shapes except flattened shapes. +// +// A call to MemberElement will write nested element tags directly using the +// provided start element. The value returned by MemberElement should be closed. +func (xv Value) MemberElement(element StartElement) Value { + return newValue(xv.w, xv.scratch, element) +} + +// FlattenedElement returns flattened element encoding. It returns a Value. +// This method should be used for flattened shapes. +// +// Unlike MemberElement, flattened element will NOT write element tags +// directly for the associated start element. +// +// The value returned by the FlattenedElement does not need to be closed. +func (xv Value) FlattenedElement(element StartElement) Value { + v := newFlattenedValue(xv.w, xv.scratch, element) + v.isFlattened = true + return v +} + +// Array returns an array encoder. By default, the members of array are +// wrapped with `` element tag. +// If value is marked as flattened, the start element is used to wrap the members instead of +// the `` element. +func (xv Value) Array() *Array { + return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) +} + +/* +ArrayWithCustomName returns an array encoder. + +It takes named start element as an argument, the named start element will used to wrap xml array entries. +for eg, `entry1` +Here `customName` named start element will be wrapped on each array member. +*/ +func (xv Value) ArrayWithCustomName(element StartElement) *Array { + return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) +} + +/* +Map returns a map encoder. By default, the map entries are +wrapped with `` element tag. + +If value is marked as flattened, the start element is used to wrap the entry instead of +the `` element. +*/ +func (xv Value) Map() *Map { + // flattened map + if xv.isFlattened { + return newFlattenedMap(xv.w, xv.scratch, xv.startElement) + } + + // un-flattened map + return newMap(xv.w, xv.scratch) +} + +// encodeByteSlice is modified copy of json encoder's encodeByteSlice. +// It is used to base64 encode a byte slice. +func encodeByteSlice(w writer, scratch []byte, v []byte) { + if v == nil { + return + } + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } +} + +// IsFlattened returns true if value is for flattened shape. +func (xv Value) IsFlattened() bool { + return xv.isFlattened +} + +// Close closes the value. +func (xv Value) Close() { + writeEndElement(xv.w, xv.startElement.End()) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go new file mode 100644 index 000000000..dc4eebdff --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go @@ -0,0 +1,154 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// NodeDecoder is a XML decoder wrapper that is responsible to decoding +// a single XML Node element and it's nested member elements. This wrapper decoder +// takes in the start element of the top level node being decoded. +type NodeDecoder struct { + Decoder *xml.Decoder + StartEl xml.StartElement +} + +// WrapNodeDecoder returns an initialized XMLNodeDecoder +func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { + return NodeDecoder{ + Decoder: decoder, + StartEl: startEl, + } +} + +// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the +// a token is the node decoder's end node token; and an error which indicates any error +// that occurred while retrieving the start element +func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { + for { + token, e := d.Decoder.Token() + if e != nil { + return t, done, e + } + + // check if we reach end of the node being decoded + if el, ok := token.(xml.EndElement); ok { + return t, el == d.StartEl.End(), err + } + + if t, ok := token.(xml.StartElement); ok { + return restoreAttrNamespaces(t), false, err + } + + // skip token if it is a comment or preamble or empty space value due to indentation + // or if it's a value and is not expected + } +} + +// restoreAttrNamespaces update XML attributes to restore the short namespaces found within +// the raw XML document. +func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { + if len(node.Attr) == 0 { + return node + } + + // Generate a mapping of XML namespace values to their short names. + ns := map[string]string{} + for _, a := range node.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + break + } + } + + for i, a := range node.Attr { + if a.Name.Space == "xmlns" { + continue + } + // By default, xml.Decoder will fully resolve these namespaces. So if you had + // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to + // continue to resolve as `bar` so we can easily identify it later on. + if v, ok := ns[node.Attr[i].Name.Space]; ok { + node.Attr[i].Name.Space = v + } + } + return node +} + +// GetElement looks for the given tag name at the current level, and returns the element if found, and +// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking +// the document. +func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { + for { + token, done, err := d.Token() + if err != nil { + return t, err + } + if done { + return t, fmt.Errorf("%s node not found", name) + } + switch { + case strings.EqualFold(name, token.Name.Local): + return token, nil + default: + err = d.Decoder.Skip() + if err != nil { + return t, err + } + } + } +} + +// Value provides an abstraction to retrieve char data value within an xml element. +// The method will return an error if it encounters a nested xml element instead of char data. +// This method should only be used to retrieve simple type or blob shape values as []byte. +func (d NodeDecoder) Value() (c []byte, err error) { + t, e := d.Decoder.Token() + if e != nil { + return c, e + } + + endElement := d.StartEl.End() + + switch ev := t.(type) { + case xml.CharData: + c = ev.Copy() + case xml.EndElement: // end tag or self-closing + if ev == endElement { + return []byte{}, err + } + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + default: + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + } + + t, e = d.Decoder.Token() + if e != nil { + return c, e + } + + if ev, ok := t.(xml.EndElement); ok { + if ev == endElement { + return c, err + } + } + + return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) +} + +// FetchRootElement takes in a decoder and returns the first start element within the xml body. +// This function is useful in fetching the start element of an XML response and ignore the +// comments and preamble +func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { + for { + t, e := decoder.Token() + if e != nil { + return startElement, e + } + + if startElement, ok := t.(xml.StartElement); ok { + return startElement, err + } + } +} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go new file mode 100644 index 000000000..d6948d020 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/errors.go @@ -0,0 +1,137 @@ +package smithy + +import "fmt" + +// APIError provides the generic API and protocol agnostic error type all SDK +// generated exception types will implement. +type APIError interface { + error + + // ErrorCode returns the error code for the API exception. + ErrorCode() string + // ErrorMessage returns the error message for the API exception. + ErrorMessage() string + // ErrorFault returns the fault for the API exception. + ErrorFault() ErrorFault +} + +// GenericAPIError provides a generic concrete API error type that SDKs can use +// to deserialize error responses into. Should be used for unmodeled or untyped +// errors. +type GenericAPIError struct { + Code string + Message string + Fault ErrorFault +} + +// ErrorCode returns the error code for the API exception. +func (e *GenericAPIError) ErrorCode() string { return e.Code } + +// ErrorMessage returns the error message for the API exception. +func (e *GenericAPIError) ErrorMessage() string { return e.Message } + +// ErrorFault returns the fault for the API exception. +func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } + +func (e *GenericAPIError) Error() string { + return fmt.Sprintf("api error %s: %s", e.Code, e.Message) +} + +var _ APIError = (*GenericAPIError)(nil) + +// OperationError decorates an underlying error which occurred while invoking +// an operation with names of the operation and API. +type OperationError struct { + ServiceID string + OperationName string + Err error +} + +// Service returns the name of the API service the error occurred with. +func (e *OperationError) Service() string { return e.ServiceID } + +// Operation returns the name of the API operation the error occurred with. +func (e *OperationError) Operation() string { return e.OperationName } + +// Unwrap returns the nested error if any, or nil. +func (e *OperationError) Unwrap() error { return e.Err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) +} + +// DeserializationError provides a wrapper for an error that occurs during +// deserialization. +type DeserializationError struct { + Err error // original error + Snapshot []byte +} + +// Error returns a formatted error for DeserializationError +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in DeserializationError +func (e *DeserializationError) Unwrap() error { return e.Err } + +// ErrorFault provides the type for a Smithy API error fault. +type ErrorFault int + +// ErrorFault enumeration values +const ( + FaultUnknown ErrorFault = iota + FaultServer + FaultClient +) + +func (f ErrorFault) String() string { + switch f { + case FaultServer: + return "server" + case FaultClient: + return "client" + default: + return "unknown" + } +} + +// SerializationError represents an error that occurred while attempting to serialize a request +type SerializationError struct { + Err error // original error +} + +// Error returns a formatted error for SerializationError +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in SerializationError +func (e *SerializationError) Unwrap() error { return e.Err } + +// CanceledError is the error that will be returned by an API request that was +// canceled. API operations given a Context may return this error when +// canceled. +type CanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*CanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go new file mode 100644 index 000000000..c1e70fc68 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package smithy + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.1" diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go new file mode 100644 index 000000000..f8417c15b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/byte.go @@ -0,0 +1,12 @@ +package io + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go new file mode 100644 index 000000000..a6a33eaf5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/doc.go @@ -0,0 +1,2 @@ +// Package io provides utilities for Smithy generated API clients. +package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go new file mode 100644 index 000000000..07063f296 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/reader.go @@ -0,0 +1,16 @@ +package io + +import ( + "io" +) + +// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method +// that does nothing. +type ReadSeekNopCloser struct { + io.ReadSeeker +} + +// Close does nothing. +func (ReadSeekNopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go new file mode 100644 index 000000000..06b476add --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go @@ -0,0 +1,94 @@ +package io + +import ( + "bytes" + "io" +) + +// RingBuffer struct satisfies io.ReadWrite interface. +// +// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a +// revolving window. +type RingBuffer struct { + slice []byte + start int + end int + size int +} + +// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. +func NewRingBuffer(slice []byte) *RingBuffer { + ringBuf := RingBuffer{ + slice: slice, + } + return &ringBuf +} + +// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. +func (r *RingBuffer) Write(p []byte) (int, error) { + for _, b := range p { + // check if end points to invalid index, we need to circle back + if r.end == len(r.slice) { + r.end = 0 + } + // check if start points to invalid index, we need to circle back + if r.start == len(r.slice) { + r.start = 0 + } + // if ring buffer is filled, increment the start index + if r.size == len(r.slice) { + r.size-- + r.start++ + } + + r.slice[r.end] = b + r.end++ + r.size++ + } + return len(p), nil +} + +// Read copies the data on the ring buffer into the byte slice provided to the method. +// Returns the read count along with any error encountered while reading. +func (r *RingBuffer) Read(p []byte) (int, error) { + // readCount keeps track of the number of bytes read + var readCount int + for j := 0; j < len(p); j++ { + // if ring buffer is empty or completely read + // return EOF error. + if r.size == 0 { + return readCount, io.EOF + } + + if r.start == len(r.slice) { + r.start = 0 + } + + p[j] = r.slice[r.start] + readCount++ + // increment the start pointer for ring buffer + r.start++ + // decrement the size of ring buffer + r.size-- + } + return readCount, nil +} + +// Len returns the number of unread bytes in the buffer. +func (r *RingBuffer) Len() int { + return r.size +} + +// Bytes returns a copy of the RingBuffer's bytes. +func (r RingBuffer) Bytes() []byte { + var b bytes.Buffer + io.Copy(&b, &r) + return b.Bytes() +} + +// Reset resets the ring buffer. +func (r *RingBuffer) Reset() { + *r = RingBuffer{ + slice: r.slice, + } +} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh new file mode 100644 index 000000000..800bf3769 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh @@ -0,0 +1,39 @@ +#1/usr/bin/env bash + +PROJECT_DIR="" +SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SMITHY_SOURCE_DIR=${OPTARG} + if [ "$SMITHY_SOURCE_DIR" == "" ]; then + echo "path to smithy-go source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd $PROJECT_DIR || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do + repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go new file mode 100644 index 000000000..2071924bd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/logging/logger.go @@ -0,0 +1,82 @@ +package logging + +import ( + "context" + "io" + "log" +) + +// Classification is the type of the log entry's classification name. +type Classification string + +// Set of standard classifications that can be used by clients and middleware +const ( + Warn Classification = "WARN" + Debug Classification = "DEBUG" +) + +// Logger is an interface for logging entries at certain classifications. +type Logger interface { + // Logf is expected to support the standard fmt package "verbs". + Logf(classification Classification, format string, v ...interface{}) +} + +// LoggerFunc is a wrapper around a function to satisfy the Logger interface. +type LoggerFunc func(classification Classification, format string, v ...interface{}) + +// Logf delegates the logging request to the wrapped function. +func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { + f(classification, format, v...) +} + +// ContextLogger is an optional interface a Logger implementation may expose that provides +// the ability to create context aware log entries. +type ContextLogger interface { + WithContext(context.Context) Logger +} + +// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting +// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will +// be returned to the caller. +func WithContext(ctx context.Context, logger Logger) Logger { + if logger == nil { + return Nop{} + } + + cl, ok := logger.(ContextLogger) + if !ok { + return logger + } + + return cl.WithContext(ctx) +} + +// Nop is a Logger implementation that simply does not perform any logging. +type Nop struct{} + +// Logf simply returns without performing any action +func (n Nop) Logf(Classification, string, ...interface{}) { + return +} + +// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's +// Printf method. +type StandardLogger struct { + Logger *log.Logger +} + +// Logf logs the given classification and message to the underlying logger. +func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { + if len(classification) != 0 { + format = string(classification) + " " + format + } + + s.Logger.Printf(format, v...) +} + +// NewStandardLogger returns a new StandardLogger +func NewStandardLogger(writer io.Writer) *StandardLogger { + return &StandardLogger{ + Logger: log.New(writer, "SDK ", log.LstdFlags), + } +} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go new file mode 100644 index 000000000..9858928a7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/doc.go @@ -0,0 +1,67 @@ +// Package middleware provides transport agnostic middleware for decorating SDK +// handlers. +// +// The Smithy middleware stack provides ordered behavior to be invoked on an +// underlying handler. The stack is separated into steps that are invoked in a +// static order. A step is a collection of middleware that are injected into a +// ordered list defined by the user. The user may add, insert, swap, and remove a +// step's middleware. When the stack is invoked the step middleware become static, +// and their order cannot be modified. +// +// A stack and its step middleware are **not** safe to modify concurrently. +// +// A stack will use the ordered list of middleware to decorate a underlying +// handler. A handler could be something like an HTTP Client that round trips an +// API operation over HTTP. +// +// Smithy Middleware Stack +// +// A Stack is a collection of middleware that wrap a handler. The stack can be +// broken down into discreet steps. Each step may contain zero or more middleware +// specific to that stack's step. +// +// A Stack Step is a predefined set of middleware that are invoked in a static +// order by the Stack. These steps represent fixed points in the middleware stack +// for organizing specific behavior, such as serialize and build. A Stack Step is +// composed of zero or more middleware that are specific to that step. A step may +// define its own set of input/output parameters the generic input/output +// parameters are cast from. A step calls its middleware recursively, before +// calling the next step in the stack returning the result or error of the step +// middleware decorating the underlying handler. +// +// * Initialize: Prepares the input, and sets any default parameters as needed, +// (e.g. idempotency token, and presigned URLs). +// +// * Serialize: Serializes the prepared input into a data structure that can be +// consumed by the target transport's message, (e.g. REST-JSON serialization). +// +// * Build: Adds additional metadata to the serialized transport message, (e.g. +// HTTP's Content-Length header, or body checksum). Decorations and +// modifications to the message should be copied to all message attempts. +// +// * Finalize: Performs final preparations needed before sending the message. The +// message should already be complete by this stage, and is only alternated to +// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request +// signing). +// +// * Deserialize: Reacts to the handler's response returned by the recipient of +// the request message. Deserializes the response into a structured type or +// error above stacks can react to. +// +// Adding Middleware to a Stack Step +// +// Middleware can be added to a step front or back, or relative, by name, to an +// existing middleware in that stack. If a middleware does not have a name, a +// unique name will be generated at the middleware and be added to the step. +// +// // Create middleware stack +// stack := middleware.NewStack() +// +// // Add middleware to stack steps +// stack.Initialize.Add(paramValidationMiddleware, middleware.After) +// stack.Serialize.Add(marshalOperationFoo, middleware.After) +// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) +// +// // Invoke middleware on handler. +// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) +package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go new file mode 100644 index 000000000..c2f0dbb6b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/logging.go @@ -0,0 +1,46 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/logging" +) + +// loggerKey is the context value key for which the logger is associated with. +type loggerKey struct{} + +// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger +// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed +// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. +func GetLogger(ctx context.Context) logging.Logger { + logger, ok := ctx.Value(loggerKey{}).(logging.Logger) + if !ok || logger == nil { + return logging.Nop{} + } + + return logging.WithContext(ctx, logger) +} + +// SetLogger sets the provided logger value on the provided ctx. +func SetLogger(ctx context.Context, logger logging.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +type setLogger struct { + Logger logging.Logger +} + +// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. +func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { + return stack.Initialize.Add(&setLogger{Logger: logger}, After) +} + +func (a *setLogger) ID() string { + return "SetLogger" +} + +func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return next.HandleInitialize(SetLogger(ctx, a.Logger), in) +} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go new file mode 100644 index 000000000..7bb7dbcf5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go @@ -0,0 +1,65 @@ +package middleware + +// MetadataReader provides an interface for reading metadata from the +// underlying metadata container. +type MetadataReader interface { + Get(key interface{}) interface{} +} + +// Metadata provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Metadata uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Metadata struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m Metadata) Get(key interface{}) interface{} { + return m.values[key] +} + +// Clone creates a shallow copy of Metadata entries, returning a new Metadata +// value with the original entries copied into it. +func (m Metadata) Clone() Metadata { + vs := make(map[interface{}]interface{}, len(m.values)) + for k, v := range m.values { + vs[k] = v + } + + return Metadata{ + values: vs, + } +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Metadata) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m Metadata) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go new file mode 100644 index 000000000..803b7c751 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "context" +) + +// Handler provides the interface for performing the logic to obtain an output, +// or error for the given input. +type Handler interface { + // Handle performs logic to obtain an output for the given input. Handler + // should be decorated with middleware to perform input specific behavior. + Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, + ) +} + +// HandlerFunc provides a wrapper around a function pointer to be used as a +// middleware handler. +type HandlerFunc func(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) + +// Handle invokes the underlying function, returning the result. +func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return fn(ctx, input) +} + +// Middleware provides the interface to call handlers in a chain. +type Middleware interface { + // ID provides a unique identifier for the middleware. + ID() string + + // Performs the middleware's handling of the input, returning the output, + // or error. The middleware can invoke the next Handler if handling should + // continue. + HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, + ) +} + +// decoratedHandler wraps a middleware in order to to call the next handler in +// the chain. +type decoratedHandler struct { + // The next handler to be called. + Next Handler + + // The current middleware decorating the handler. + With Middleware +} + +// Handle implements the Handler interface to handle a operation invocation. +func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return m.With.HandleMiddleware(ctx, input, m.Next) +} + +// DecorateHandler decorates a handler with a middleware. Wrapping the handler +// with the middleware. +func DecorateHandler(h Handler, with ...Middleware) Handler { + for i := len(with) - 1; i >= 0; i-- { + h = decoratedHandler{ + Next: h, + With: with[i], + } + } + + return h +} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go new file mode 100644 index 000000000..4b195308c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -0,0 +1,268 @@ +package middleware + +import "fmt" + +// RelativePosition provides specifying the relative position of a middleware +// in an ordered group. +type RelativePosition int + +// Relative position for middleware in steps. +const ( + After RelativePosition = iota + Before +) + +type ider interface { + ID() string +} + +// orderedIDs provides an ordered collection of items with relative ordering +// by name. +type orderedIDs struct { + order *relativeOrder + items map[string]ider +} + +const baseOrderedItems = 5 + +func newOrderedIDs() *orderedIDs { + return &orderedIDs{ + order: newRelativeOrder(), + items: make(map[string]ider, baseOrderedItems), + } +} + +// Add injects the item to the relative position of the item group. Returns an +// error if the item already exists. +func (g *orderedIDs) Add(m ider, pos RelativePosition) error { + id := m.ID() + if len(id) == 0 { + return fmt.Errorf("empty ID, ID must not be empty") + } + + if err := g.order.Add(pos, id); err != nil { + return err + } + + g.items[id] = m + return nil +} + +// Insert injects the item relative to an existing item id. Returns an error if +// the original item does not exist, or the item being added already exists. +func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { + if len(m.ID()) == 0 { + return fmt.Errorf("insert ID must not be empty") + } + if len(relativeTo) == 0 { + return fmt.Errorf("relative to ID must not be empty") + } + + if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { + return err + } + + g.items[m.ID()] = m + return nil +} + +// Get returns the ider identified by id. If ider is not present, returns false. +func (g *orderedIDs) Get(id string) (ider, bool) { + v, ok := g.items[id] + return v, ok +} + +// Swap removes the item by id, replacing it with the new item. Returns an error +// if the original item doesn't exist. +func (g *orderedIDs) Swap(id string, m ider) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("swap from ID must not be empty") + } + + iderID := m.ID() + if len(iderID) == 0 { + return nil, fmt.Errorf("swap to ID must not be empty") + } + + if err := g.order.Swap(id, iderID); err != nil { + return nil, err + } + + removed := g.items[id] + + delete(g.items, id) + g.items[iderID] = m + + return removed, nil +} + +// Remove removes the item by id. Returns an error if the item +// doesn't exist. +func (g *orderedIDs) Remove(id string) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("remove ID must not be empty") + } + + if err := g.order.Remove(id); err != nil { + return nil, err + } + + removed := g.items[id] + delete(g.items, id) + return removed, nil +} + +func (g *orderedIDs) List() []string { + items := g.order.List() + order := make([]string, len(items)) + copy(order, items) + return order +} + +// Clear removes all entries and slots. +func (g *orderedIDs) Clear() { + g.order.Clear() + g.items = map[string]ider{} +} + +// GetOrder returns the item in the order it should be invoked in. +func (g *orderedIDs) GetOrder() []interface{} { + order := g.order.List() + ordered := make([]interface{}, len(order)) + for i := 0; i < len(order); i++ { + ordered[i] = g.items[order[i]] + } + + return ordered +} + +// relativeOrder provides ordering of item +type relativeOrder struct { + order []string +} + +func newRelativeOrder() *relativeOrder { + return &relativeOrder{ + order: make([]string, 0, baseOrderedItems), + } +} + +// Add inserts an item into the order relative to the position provided. +func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + switch pos { + case Before: + return s.insert(0, Before, ids...) + + case After: + s.order = append(s.order, ids...) + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +// Insert injects an item before or after the relative item. Returns +// an error if the relative item does not exist. +func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + i, ok := s.has(relativeTo) + if !ok { + return fmt.Errorf("not found, %v", relativeTo) + } + + return s.insert(i, pos, ids...) +} + +// Swap will replace the item id with the to item. Returns an +// error if the original item id does not exist. Allows swapping out an +// item for another item with the same id. +func (s *relativeOrder) Swap(id, to string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + if _, ok = s.has(to); ok && id != to { + return fmt.Errorf("already exists, %v", to) + } + + s.order[i] = to + return nil +} + +func (s *relativeOrder) Remove(id string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + s.order = append(s.order[:i], s.order[i+1:]...) + return nil +} + +func (s *relativeOrder) List() []string { + return s.order +} + +func (s *relativeOrder) Clear() { + s.order = s.order[0:0] +} + +func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { + switch pos { + case Before: + n := len(ids) + var src []string + if n <= cap(s.order)-len(s.order) { + s.order = s.order[:len(s.order)+n] + src = s.order + } else { + src = s.order + s.order = make([]string, len(s.order)+n) + copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half + } + copy(s.order[i+n:], src[i:]) + copy(s.order[i:], ids) + case After: + if i == len(s.order)-1 || len(s.order) == 0 { + s.order = append(s.order, ids...) + } else { + s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) + } + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +func (s *relativeOrder) has(id string) (i int, found bool) { + for i := 0; i < len(s.order); i++ { + if s.order[i] == id { + return i, true + } + } + return 0, false +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go new file mode 100644 index 000000000..45ccb5b93 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack.go @@ -0,0 +1,209 @@ +package middleware + +import ( + "context" + "io" + "strings" +) + +// Stack provides protocol and transport agnostic set of middleware split into +// distinct steps. Steps have specific transitions between them, that are +// managed by the individual step. +// +// Steps are composed as middleware around the underlying handler in the +// following order: +// +// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler +// +// Any middleware within the chain may choose to stop and return an error or +// response. Since the middleware decorate the handler like a call stack, each +// middleware will receive the result of the next middleware in the chain. +// Middleware that does not need to react to an input, or result must forward +// along the input down the chain, or return the result back up the chain. +// +// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler +type Stack struct { + // Initialize prepares the input, and sets any default parameters as + // needed, (e.g. idempotency token, and presigned URLs). + // + // Takes Input Parameters, and returns result or error. + // + // Receives result or error from Serialize step. + Initialize *InitializeStep + + // Serialize serializes the prepared input into a data structure that can be consumed + // by the target transport's message, (e.g. REST-JSON serialization) + // + // Converts Input Parameters into a Request, and returns the result or error. + // + // Receives result or error from Build step. + Serialize *SerializeStep + + // Build adds additional metadata to the serialized transport message + // (e.g. HTTP's Content-Length header, or body checksum). Decorations and + // modifications to the message should be copied to all message attempts. + // + // Takes Request, and returns result or error. + // + // Receives result or error from Finalize step. + Build *BuildStep + + // Finalize performs final preparations needed before sending the message. The + // message should already be complete by this stage, and is only alternated + // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + // request signing) + // + // Takes Request, and returns result or error. + // + // Receives result or error from Deserialize step. + Finalize *FinalizeStep + + // Deserialize reacts to the handler's response returned by the recipient of the request + // message. Deserializes the response into a structured type or error above + // stacks can react to. + // + // Should only forward Request to underlying handler. + // + // Takes Request, and returns result or error. + // + // Receives raw response, or error from underlying handler. + Deserialize *DeserializeStep + + id string +} + +// NewStack returns an initialize empty stack. +func NewStack(id string, newRequestFn func() interface{}) *Stack { + return &Stack{ + id: id, + Initialize: NewInitializeStep(), + Serialize: NewSerializeStep(newRequestFn), + Build: NewBuildStep(), + Finalize: NewFinalizeStep(), + Deserialize: NewDeserializeStep(), + } +} + +// ID returns the unique ID for the stack as a middleware. +func (s *Stack) ID() string { return s.id } + +// HandleMiddleware invokes the middleware stack decorating the next handler. +// Each step of stack will be invoked in order before calling the next step. +// With the next handler call last. +// +// The input value must be the input parameters of the operation being +// performed. +// +// Will return the result of the operation, or error. +func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, +) { + h := DecorateHandler(next, + s.Initialize, + s.Serialize, + s.Build, + s.Finalize, + s.Deserialize, + ) + + return h.Handle(ctx, input) +} + +// List returns a list of all middleware in the stack by step. +func (s *Stack) List() []string { + var l []string + l = append(l, s.id) + + l = append(l, s.Initialize.ID()) + l = append(l, s.Initialize.List()...) + + l = append(l, s.Serialize.ID()) + l = append(l, s.Serialize.List()...) + + l = append(l, s.Build.ID()) + l = append(l, s.Build.List()...) + + l = append(l, s.Finalize.ID()) + l = append(l, s.Finalize.List()...) + + l = append(l, s.Deserialize.ID()) + l = append(l, s.Deserialize.List()...) + + return l +} + +func (s *Stack) String() string { + var b strings.Builder + + w := &indentWriter{w: &b} + + w.WriteLine(s.id) + w.Push() + + writeStepItems(w, s.Initialize) + writeStepItems(w, s.Serialize) + writeStepItems(w, s.Build) + writeStepItems(w, s.Finalize) + writeStepItems(w, s.Deserialize) + + return b.String() +} + +type stackStepper interface { + ID() string + List() []string +} + +func writeStepItems(w *indentWriter, s stackStepper) { + type lister interface { + List() []string + } + + w.WriteLine(s.ID()) + w.Push() + + defer w.Pop() + + // ignore stack to prevent circular iterations + if _, ok := s.(*Stack); ok { + return + } + + for _, id := range s.List() { + w.WriteLine(id) + } +} + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) + WriteRune(rune) (int, error) +} + +type indentWriter struct { + w stringWriter + depth int +} + +const indentDepth = "\t\t\t\t\t\t\t\t\t\t" + +func (w *indentWriter) Push() { + w.depth++ +} + +func (w *indentWriter) Pop() { + w.depth-- + if w.depth < 0 { + w.depth = 0 + } +} + +func (w *indentWriter) WriteLine(v string) { + w.w.WriteString(indentDepth[:w.depth]) + + v = strings.ReplaceAll(v, "\n", "\\n") + v = strings.ReplaceAll(v, "\r", "\\r") + + w.w.WriteString(v) + w.w.WriteRune('\n') +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go new file mode 100644 index 000000000..ef96009ba --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "reflect" + "strings" +) + +// WithStackValue adds a key value pair to the context that is intended to be +// scoped to a stack. Use ClearStackValues to get a new context with all stack +// values cleared. +func WithStackValue(ctx context.Context, key, value interface{}) context.Context { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + + md = withStackValue(md, key, value) + return context.WithValue(ctx, stackValuesKey{}, md) +} + +// ClearStackValues returns a context without any stack values. +func ClearStackValues(ctx context.Context) context.Context { + return context.WithValue(ctx, stackValuesKey{}, nil) +} + +// GetStackValues returns the value pointed to by the key within the stack +// values, if it is present. +func GetStackValue(ctx context.Context, key interface{}) interface{} { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + if md == nil { + return nil + } + + return md.Value(key) +} + +type stackValuesKey struct{} + +type stackValues struct { + key interface{} + value interface{} + parent *stackValues +} + +func withStackValue(parent *stackValues, key, value interface{}) *stackValues { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &stackValues{key: key, value: value, parent: parent} +} + +func (m *stackValues) Value(key interface{}) interface{} { + if key == m.key { + return m.value + } + + if m.parent == nil { + return nil + } + + return m.parent.Value(key) +} + +func (c *stackValues) String() string { + var str strings.Builder + + cc := c + for cc == nil { + str.WriteString("(" + + reflect.TypeOf(c.key).String() + + ": " + + stringify(cc.value) + + ")") + if cc.parent != nil { + str.WriteString(" -> ") + } + cc = cc.parent + } + str.WriteRune('}') + + return str.String() +} + +type stringer interface { + String() string +} + +// stringify tries a bit to stringify v, without using fmt, since we don't +// want context depending on the unicode tables. This is only used by +// *valueCtx.String(). +func stringify(v interface{}) string { + switch s := v.(type) { + case stringer: + return s.String() + case string: + return s + } + return "" +} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go new file mode 100644 index 000000000..7e1d94cae --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -0,0 +1,211 @@ +package middleware + +import ( + "context" +) + +// BuildInput provides the input parameters for the BuildMiddleware to consume. +// BuildMiddleware may modify the Request value before forwarding the input +// along to the next BuildHandler. +type BuildInput struct { + Request interface{} +} + +// BuildOutput provides the result returned by the next BuildHandler. +type BuildOutput struct { + Result interface{} +} + +// BuildHandler provides the interface for the next handler the +// BuildMiddleware will call in the middleware chain. +type BuildHandler interface { + HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next BuildHandler for further +// processing. +type BuildMiddleware interface { + // Unique ID for the middleware in theBuildStep. The step does not allow + // duplicate IDs. + ID() string + + // Invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, +// and the func to be invoked. +func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { + return buildMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type buildMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) +} + +// ID returns the unique ID for the middleware. +func (s buildMiddlewareFunc) ID() string { return s.id } + +// HandleBuild invokes the middleware Fn. +func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ BuildMiddleware = (buildMiddlewareFunc{}) + +// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on +// a handler. +type BuildStep struct { + ids *orderedIDs +} + +// NewBuildStep returns a BuildStep ready to have middleware for +// initialization added to it. +func NewBuildStep() *BuildStep { + return &BuildStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*BuildStep)(nil) + +// ID returns the unique name of the step as a middleware. +func (s *BuildStep) ID() string { + return "Build stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h BuildHandler = buildWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedBuildHandler{ + Next: h, + With: order[i].(BuildMiddleware), + } + } + + sIn := BuildInput{ + Request: in, + } + + res, metadata, err := h.HandleBuild(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(BuildMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware id. +// Returns an error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or an error if the middleware to be removed +// doesn't exist. +func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *BuildStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *BuildStep) Clear() { + s.ids.Clear() +} + +type buildWrapHandler struct { + Next Handler +} + +var _ BuildHandler = (*buildWrapHandler)(nil) + +// Implements BuildHandler, converts types and delegates to underlying +// generic handler. +func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return BuildOutput{ + Result: res, + }, metadata, err +} + +type decoratedBuildHandler struct { + Next BuildHandler + With BuildMiddleware +} + +var _ BuildHandler = (*decoratedBuildHandler)(nil) + +func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + return h.With.HandleBuild(ctx, in, h.Next) +} + +// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) + +// HandleBuild invokes the wrapped function with the provided arguments. +func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return b(ctx, in) +} + +var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go new file mode 100644 index 000000000..448607215 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "context" +) + +// DeserializeInput provides the input parameters for the DeserializeInput to +// consume. DeserializeMiddleware should not modify the Request, and instead +// forward it along to the next DeserializeHandler. +type DeserializeInput struct { + Request interface{} +} + +// DeserializeOutput provides the result returned by the next +// DeserializeHandler. The DeserializeMiddleware should deserialize the +// RawResponse into a Result that can be consumed by middleware higher up in +// the stack. +type DeserializeOutput struct { + RawResponse interface{} + Result interface{} +} + +// DeserializeHandler provides the interface for the next handler the +// DeserializeMiddleware will call in the middleware chain. +type DeserializeHandler interface { + HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next DeserializeHandler for further +// processing. +type DeserializeMiddleware interface { + // ID returns a unique ID for the middleware in the DeserializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleDeserialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID +// provided, and the func to be invoked. +func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { + return deserializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type deserializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, DeserializeInput, DeserializeHandler) ( + DeserializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s deserializeMiddlewareFunc) ID() string { return s.id } + +// HandleDeserialize invokes the middleware Fn. +func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) + +// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be +// invoked on a handler. +type DeserializeStep struct { + ids *orderedIDs +} + +// NewDeserializeStep returns a DeserializeStep ready to have middleware for +// initialization added to it. +func NewDeserializeStep() *DeserializeStep { + return &DeserializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*DeserializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *DeserializeStep) ID() string { + return "Deserialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h DeserializeHandler = deserializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedDeserializeHandler{ + Next: h, + With: order[i].(DeserializeMiddleware), + } + } + + sIn := DeserializeInput{ + Request: in, + } + + res, metadata, err := h.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(DeserializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *DeserializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *DeserializeStep) Clear() { + s.ids.Clear() +} + +type deserializeWrapHandler struct { + Next Handler +} + +var _ DeserializeHandler = (*deserializeWrapHandler)(nil) + +// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying +// generic handler. +func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + resp, metadata, err := w.Next.Handle(ctx, in.Request) + return DeserializeOutput{ + RawResponse: resp, + }, metadata, err +} + +type decoratedDeserializeHandler struct { + Next DeserializeHandler + With DeserializeMiddleware +} + +var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) + +func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return h.With.HandleDeserialize(ctx, in, h.Next) +} + +// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) + +// HandleDeserialize invokes the wrapped function with the given arguments. +func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return d(ctx, in) +} + +var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go new file mode 100644 index 000000000..065e3885d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// FinalizeInput provides the input parameters for the FinalizeMiddleware to +// consume. FinalizeMiddleware may modify the Request value before forwarding +// the FinalizeInput along to the next next FinalizeHandler. +type FinalizeInput struct { + Request interface{} +} + +// FinalizeOutput provides the result returned by the next FinalizeHandler. +type FinalizeOutput struct { + Result interface{} +} + +// FinalizeHandler provides the interface for the next handler the +// FinalizeMiddleware will call in the middleware chain. +type FinalizeHandler interface { + HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next FinalizeHandler for further +// processing. +type FinalizeMiddleware interface { + // ID returns a unique ID for the middleware in the FinalizeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleFinalize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID +// provided, and the func to be invoked. +func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { + return finalizeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type finalizeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, FinalizeInput, FinalizeHandler) ( + FinalizeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s finalizeMiddlewareFunc) ID() string { return s.id } + +// HandleFinalize invokes the middleware Fn. +func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) + +// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be +// invoked on a handler. +type FinalizeStep struct { + ids *orderedIDs +} + +// NewFinalizeStep returns a FinalizeStep ready to have middleware for +// initialization added to it. +func NewFinalizeStep() *FinalizeStep { + return &FinalizeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*FinalizeStep)(nil) + +// ID returns the unique id of the step as a middleware. +func (s *FinalizeStep) ID() string { + return "Finalize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h FinalizeHandler = finalizeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedFinalizeHandler{ + Next: h, + With: order[i].(FinalizeMiddleware), + } + } + + sIn := FinalizeInput{ + Request: in, + } + + res, metadata, err := h.HandleFinalize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(FinalizeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *FinalizeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *FinalizeStep) Clear() { + s.ids.Clear() +} + +type finalizeWrapHandler struct { + Next Handler +} + +var _ FinalizeHandler = (*finalizeWrapHandler)(nil) + +// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying +// generic handler. +func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return FinalizeOutput{ + Result: res, + }, metadata, err +} + +type decoratedFinalizeHandler struct { + Next FinalizeHandler + With FinalizeMiddleware +} + +var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) + +func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return h.With.HandleFinalize(ctx, in, h.Next) +} + +// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) + +// HandleFinalize invokes the wrapped function with the given arguments. +func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { + return f(ctx, in) +} + +var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go new file mode 100644 index 000000000..fe359144d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// InitializeInput wraps the input parameters for the InitializeMiddlewares to +// consume. InitializeMiddleware may modify the parameter value before +// forwarding it along to the next InitializeHandler. +type InitializeInput struct { + Parameters interface{} +} + +// InitializeOutput provides the result returned by the next InitializeHandler. +type InitializeOutput struct { + Result interface{} +} + +// InitializeHandler provides the interface for the next handler the +// InitializeMiddleware will call in the middleware chain. +type InitializeHandler interface { + HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddleware provides the interface for middleware specific to the +// initialize step. Delegates to the next InitializeHandler for further +// processing. +type InitializeMiddleware interface { + // ID returns a unique ID for the middleware in the InitializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleInitialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, +// and the func to be invoked. +func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { + return initializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type initializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, InitializeInput, InitializeHandler) ( + InitializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s initializeMiddlewareFunc) ID() string { return s.id } + +// HandleInitialize invokes the middleware Fn. +func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ InitializeMiddleware = (initializeMiddlewareFunc{}) + +// InitializeStep provides the ordered grouping of InitializeMiddleware to be +// invoked on a handler. +type InitializeStep struct { + ids *orderedIDs +} + +// NewInitializeStep returns an InitializeStep ready to have middleware for +// initialization added to it. +func NewInitializeStep() *InitializeStep { + return &InitializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*InitializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *InitializeStep) ID() string { + return "Initialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h InitializeHandler = initializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedInitializeHandler{ + Next: h, + With: order[i].(InitializeMiddleware), + } + } + + sIn := InitializeInput{ + Parameters: in, + } + + res, metadata, err := h.HandleInitialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(InitializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *InitializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *InitializeStep) Clear() { + s.ids.Clear() +} + +type initializeWrapHandler struct { + Next Handler +} + +var _ InitializeHandler = (*initializeWrapHandler)(nil) + +// HandleInitialize implements InitializeHandler, converts types and delegates to underlying +// generic handler. +func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Parameters) + return InitializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedInitializeHandler struct { + Next InitializeHandler + With InitializeMiddleware +} + +var _ InitializeHandler = (*decoratedInitializeHandler)(nil) + +func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + return h.With.HandleInitialize(ctx, in, h.Next) +} + +// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) + +// HandleInitialize calls the wrapped function with the provided arguments. +func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return i(ctx, in) +} + +var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go new file mode 100644 index 000000000..114bafced --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -0,0 +1,219 @@ +package middleware + +import "context" + +// SerializeInput provides the input parameters for the SerializeMiddleware to +// consume. SerializeMiddleware may modify the Request value before forwarding +// SerializeInput along to the next SerializeHandler. The Parameters member +// should not be modified by SerializeMiddleware, InitializeMiddleware should +// be responsible for modifying the provided Parameter value. +type SerializeInput struct { + Parameters interface{} + Request interface{} +} + +// SerializeOutput provides the result returned by the next SerializeHandler. +type SerializeOutput struct { + Result interface{} +} + +// SerializeHandler provides the interface for the next handler the +// SerializeMiddleware will call in the middleware chain. +type SerializeHandler interface { + HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next SerializeHandler for further +// processing. +type SerializeMiddleware interface { + // ID returns a unique ID for the middleware in the SerializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleSerialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID +// provided, and the func to be invoked. +func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { + return serializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type serializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, SerializeInput, SerializeHandler) ( + SerializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s serializeMiddlewareFunc) ID() string { return s.id } + +// HandleSerialize invokes the middleware Fn. +func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ SerializeMiddleware = (serializeMiddlewareFunc{}) + +// SerializeStep provides the ordered grouping of SerializeMiddleware to be +// invoked on a handler. +type SerializeStep struct { + newRequest func() interface{} + ids *orderedIDs +} + +// NewSerializeStep returns a SerializeStep ready to have middleware for +// initialization added to it. The newRequest func parameter is used to +// initialize the transport specific request for the stack SerializeStep to +// serialize the input parameters into. +func NewSerializeStep(newRequest func() interface{}) *SerializeStep { + return &SerializeStep{ + ids: newOrderedIDs(), + newRequest: newRequest, + } +} + +var _ Middleware = (*SerializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *SerializeStep) ID() string { + return "Serialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h SerializeHandler = serializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedSerializeHandler{ + Next: h, + With: order[i].(SerializeMiddleware), + } + } + + sIn := SerializeInput{ + Parameters: in, + Request: s.newRequest(), + } + + res, metadata, err := h.HandleSerialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(SerializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *SerializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *SerializeStep) Clear() { + s.ids.Clear() +} + +type serializeWrapHandler struct { + Next Handler +} + +var _ SerializeHandler = (*serializeWrapHandler)(nil) + +// Implements SerializeHandler, converts types and delegates to underlying +// generic handler. +func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return SerializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedSerializeHandler struct { + Next SerializeHandler + With SerializeMiddleware +} + +var _ SerializeHandler = (*decoratedSerializeHandler)(nil) + +func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + return h.With.HandleSerialize(ctx, in, h.Next) +} + +// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) + +// HandleSerialize calls the wrapped function with the provided arguments. +func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return s(ctx, in) +} + +var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go new file mode 100644 index 000000000..bc1f69961 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/doc.go @@ -0,0 +1,5 @@ +// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. +package ptr + +//go:generate go run -tags codegen generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go new file mode 100644 index 000000000..a2845bb2c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go @@ -0,0 +1,601 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + ps := make([]bool, len(vs)) + for i, v := range vs { + ps[i] = ToBool(v) + } + + return ps +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + ps := make(map[string]bool, len(vs)) + for k, v := range vs { + ps[k] = ToBool(v) + } + + return ps +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + if p == nil { + return v + } + + return *p +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + ps := make([]byte, len(vs)) + for i, v := range vs { + ps[i] = ToByte(v) + } + + return ps +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + ps := make(map[string]byte, len(vs)) + for k, v := range vs { + ps[k] = ToByte(v) + } + + return ps +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + ps := make([]string, len(vs)) + for i, v := range vs { + ps[i] = ToString(v) + } + + return ps +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + ps := make(map[string]string, len(vs)) + for k, v := range vs { + ps[k] = ToString(v) + } + + return ps +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + ps := make([]int, len(vs)) + for i, v := range vs { + ps[i] = ToInt(v) + } + + return ps +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + ps := make(map[string]int, len(vs)) + for k, v := range vs { + ps[k] = ToInt(v) + } + + return ps +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + if p == nil { + return v + } + + return *p +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + ps := make([]int8, len(vs)) + for i, v := range vs { + ps[i] = ToInt8(v) + } + + return ps +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + ps := make(map[string]int8, len(vs)) + for k, v := range vs { + ps[k] = ToInt8(v) + } + + return ps +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + if p == nil { + return v + } + + return *p +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + ps := make([]int16, len(vs)) + for i, v := range vs { + ps[i] = ToInt16(v) + } + + return ps +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + ps := make(map[string]int16, len(vs)) + for k, v := range vs { + ps[k] = ToInt16(v) + } + + return ps +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + if p == nil { + return v + } + + return *p +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + ps := make([]int32, len(vs)) + for i, v := range vs { + ps[i] = ToInt32(v) + } + + return ps +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + ps := make(map[string]int32, len(vs)) + for k, v := range vs { + ps[k] = ToInt32(v) + } + + return ps +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + ps := make([]int64, len(vs)) + for i, v := range vs { + ps[i] = ToInt64(v) + } + + return ps +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + ps := make(map[string]int64, len(vs)) + for k, v := range vs { + ps[k] = ToInt64(v) + } + + return ps +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + if p == nil { + return v + } + + return *p +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + ps := make([]uint, len(vs)) + for i, v := range vs { + ps[i] = ToUint(v) + } + + return ps +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + ps := make(map[string]uint, len(vs)) + for k, v := range vs { + ps[k] = ToUint(v) + } + + return ps +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + if p == nil { + return v + } + + return *p +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + ps := make([]uint8, len(vs)) + for i, v := range vs { + ps[i] = ToUint8(v) + } + + return ps +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + ps := make(map[string]uint8, len(vs)) + for k, v := range vs { + ps[k] = ToUint8(v) + } + + return ps +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + if p == nil { + return v + } + + return *p +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + ps := make([]uint16, len(vs)) + for i, v := range vs { + ps[i] = ToUint16(v) + } + + return ps +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + ps := make(map[string]uint16, len(vs)) + for k, v := range vs { + ps[k] = ToUint16(v) + } + + return ps +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + if p == nil { + return v + } + + return *p +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + ps := make([]uint32, len(vs)) + for i, v := range vs { + ps[i] = ToUint32(v) + } + + return ps +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + ps := make(map[string]uint32, len(vs)) + for k, v := range vs { + ps[k] = ToUint32(v) + } + + return ps +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + if p == nil { + return v + } + + return *p +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + ps := make([]uint64, len(vs)) + for i, v := range vs { + ps[i] = ToUint64(v) + } + + return ps +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + ps := make(map[string]uint64, len(vs)) + for k, v := range vs { + ps[k] = ToUint64(v) + } + + return ps +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + if p == nil { + return v + } + + return *p +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + ps := make([]float32, len(vs)) + for i, v := range vs { + ps[i] = ToFloat32(v) + } + + return ps +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + ps := make(map[string]float32, len(vs)) + for k, v := range vs { + ps[k] = ToFloat32(v) + } + + return ps +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + if p == nil { + return v + } + + return *p +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + ps := make([]float64, len(vs)) + for i, v := range vs { + ps[i] = ToFloat64(v) + } + + return ps +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + ps := make(map[string]float64, len(vs)) + for k, v := range vs { + ps[k] = ToFloat64(v) + } + + return ps +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + ps := make([]time.Time, len(vs)) + for i, v := range vs { + ps[i] = ToTime(v) + } + + return ps +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + ps := make(map[string]time.Time, len(vs)) + for k, v := range vs { + ps[k] = ToTime(v) + } + + return ps +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + ps := make([]time.Duration, len(vs)) + for i, v := range vs { + ps[i] = ToDuration(v) + } + + return ps +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + ps := make(map[string]time.Duration, len(vs)) + for k, v := range vs { + ps[k] = ToDuration(v) + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go new file mode 100644 index 000000000..97f01011e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go @@ -0,0 +1,83 @@ +//go:build codegen +// +build codegen + +package ptr + +import "strings" + +func GetScalars() Scalars { + return Scalars{ + {Type: "bool"}, + {Type: "byte"}, + {Type: "string"}, + {Type: "int"}, + {Type: "int8"}, + {Type: "int16"}, + {Type: "int32"}, + {Type: "int64"}, + {Type: "uint"}, + {Type: "uint8"}, + {Type: "uint16"}, + {Type: "uint32"}, + {Type: "uint64"}, + {Type: "float32"}, + {Type: "float64"}, + {Type: "Time", Import: &Import{Path: "time"}}, + {Type: "Duration", Import: &Import{Path: "time"}}, + } +} + +// Import provides the import path and optional alias +type Import struct { + Path string + Alias string +} + +// Package returns the Go package name for the import. Returns alias if set. +func (i Import) Package() string { + if v := i.Alias; len(v) != 0 { + return v + } + + if v := i.Path; len(v) != 0 { + parts := strings.Split(v, "/") + pkg := parts[len(parts)-1] + return pkg + } + + return "" +} + +// Scalar provides the definition of a type to generate pointer utilities for. +type Scalar struct { + Type string + Import *Import +} + +// Name returns the exported function name for the type. +func (t Scalar) Name() string { + return strings.Title(t.Type) +} + +// Symbol returns the scalar's Go symbol with path if needed. +func (t Scalar) Symbol() string { + if t.Import != nil { + return t.Import.Package() + "." + t.Type + } + return t.Type +} + +// Scalars is a list of scalars. +type Scalars []Scalar + +// Imports returns all imports for the scalars. +func (ts Scalars) Imports() []*Import { + imports := []*Import{} + for _, t := range ts { + if v := t.Import; v != nil { + imports = append(imports, v) + } + } + + return imports +} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go new file mode 100644 index 000000000..0bfbbecbd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go @@ -0,0 +1,499 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return &v +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + ps := make([]*time.Duration, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + ps := make(map[string]*time.Duration, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go new file mode 100644 index 000000000..f8b25d562 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/doc.go @@ -0,0 +1,3 @@ +// Package rand provides utilities for creating and working with random value +// generators. +package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go new file mode 100644 index 000000000..9c479f62b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/rand.go @@ -0,0 +1,31 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +// Int63n returns a int64 between zero and value of max, read from an io.Reader source. +func Int63n(reader io.Reader, max int64) (int64, error) { + bi, err := rand.Int(reader, big.NewInt(max)) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %w", err) + } + + return bi.Int64(), nil +} + +// CryptoRandInt63n returns a random int64 between zero and value of max +// obtained from the crypto rand source. +func CryptoRandInt63n(max int64) (int64, error) { + return Int63n(Reader, max) +} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go new file mode 100644 index 000000000..dc81cbc68 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/uuid.go @@ -0,0 +1,87 @@ +package rand + +import ( + "encoding/hex" + "io" +) + +const dash byte = '-' + +// UUIDIdempotencyToken provides a utility to get idempotency tokens in the +// UUID format. +type UUIDIdempotencyToken struct { + uuid *UUID +} + +// NewUUIDIdempotencyToken returns a idempotency token provider returning +// tokens in the UUID random format using the reader provided. +func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { + return &UUIDIdempotencyToken{uuid: NewUUID(r)} +} + +// GetIdempotencyToken returns a random UUID value for Idempotency token. +func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { + return u.uuid.GetUUID() +} + +// UUID provides computing random UUID version 4 values from a random source +// reader. +type UUID struct { + randSrc io.Reader +} + +// NewUUID returns an initialized UUID value that can be used to retrieve +// random UUID version 4 values. +func NewUUID(r io.Reader) *UUID { + return &UUID{randSrc: r} +} + +// GetUUID returns a random UUID version 4 string representation sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetUUID() (string, error) { + var b [16]byte + if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { + return "", err + } + r.makeUUIDv4(b[:]) + return format(b), nil +} + +// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetBytes() (u []byte, err error) { + u = make([]byte, 16) + if _, err = io.ReadFull(r.randSrc, u); err != nil { + return u, err + } + r.makeUUIDv4(u) + return u, nil +} + +func (r *UUID) makeUUIDv4(u []byte) { + // 13th character is "4" + u[6] = (u[6] & 0x0f) | 0x40 // Version 4 + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 +} + +// Format returns the canonical text representation of a UUID. +// This implementation is optimized to not use fmt. +// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d +func format(u [16]byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + + var scratch [36]byte + + hex.Encode(scratch[:8], u[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], u[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], u[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], u[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], u[10:]) + + return string(scratch[:]) +} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go new file mode 100644 index 000000000..b552a09f8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/time/time.go @@ -0,0 +1,134 @@ +package time + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" +) + +const ( + // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 + dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" + dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" + dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" + + // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 + // IMF-fixdate with no UTC offset. + httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // Additional formats needed for compatibility. + httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +var millisecondFloat = big.NewFloat(1e3) + +// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func FormatDateTime(value time.Time) string { + return value.UTC().Format(dateTimeFormatOutput) +} + +// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func ParseDateTime(value string) (time.Time, error) { + return tryParse(value, + dateTimeFormatInput, + dateTimeFormatInputNoZ, + time.RFC3339Nano, + time.RFC3339, + ) +} + +// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func FormatHTTPDate(value time.Time) string { + return value.UTC().Format(httpDateFormat) +} + +// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func ParseHTTPDate(value string) (time.Time, error) { + return tryParse(value, + httpDateFormat, + httpDateFormatSingleDigitDay, + httpDateFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) +} + +// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func FormatEpochSeconds(value time.Time) float64 { + ms := value.UnixNano() / int64(time.Millisecond) + return float64(ms) / 1e3 +} + +// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func ParseEpochSeconds(value float64) time.Time { + f := big.NewFloat(value) + f = f.Mul(f, millisecondFloat) + i, _ := f.Int64() + // Offset to `UTC` because time.Unix returns the time value based on system + // local setting. + return time.Unix(0, i*1e6).UTC() +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s strings.Builder + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} + +// SleepWithContext will wait for the timer duration to expire, or until the context +// is canceled. Whichever happens first. If the context is canceled the +// Context's error will be returned. +func SleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go new file mode 100644 index 000000000..bc4ad6e79 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +const contentMD5Header = "Content-Md5" + +// contentMD5Checksum provides a middleware to compute and set +// content-md5 checksum for a http request +type contentMD5Checksum struct { +} + +// AddContentChecksumMiddleware adds checksum middleware to middleware's +// build step. +func AddContentChecksumMiddleware(stack *middleware.Stack) error { + // This middleware must be executed before request body is set. + return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) +} + +// ID returns the identifier for the checksum middleware +func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } + +// HandleBuild adds behavior to compute md5 checksum and add content-md5 header +// on http request +func (m *contentMD5Checksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if Content-MD5 header is already present, return + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return next.HandleBuild(ctx, in) + } + + // fetch the request stream. + stream := req.GetStream() + // compute checksum if payload is explicit + if stream != nil { + if !req.IsStreamSeekable() { + return out, metadata, fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) + } + + // reset the request stream + if err := req.RewindStream(); err != nil { + return out, metadata, fmt.Errorf( + "error rewinding request stream after computing md5 checksum, %w", err) + } + + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + } + + // set md5 header value + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go new file mode 100644 index 000000000..e691c69bf --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -0,0 +1,120 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// ClientDo provides the interface for custom HTTP client implementations. +type ClientDo interface { + Do(*http.Request) (*http.Response, error) +} + +// ClientDoFunc provides a helper to wrap a function as an HTTP client for +// round tripping requests. +type ClientDoFunc func(*http.Request) (*http.Response, error) + +// Do will invoke the underlying func, returning the result. +func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { + return fn(r) +} + +// ClientHandler wraps a client that implements the HTTP Do method. Standard +// implementation is http.Client. +type ClientHandler struct { + client ClientDo +} + +// NewClientHandler returns an initialized middleware handler for the client. +func NewClientHandler(client ClientDo) ClientHandler { + return ClientHandler{ + client: client, + } +} + +// Handle implements the middleware Handler interface, that will invoke the +// underlying HTTP client. Requires the input to be a Smithy *Request. Returns +// a smithy *Response, or error if the request failed. +func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( + out interface{}, metadata middleware.Metadata, err error, +) { + req, ok := input.(*Request) + if !ok { + return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) + } + + builtRequest := req.Build(ctx) + if err := ValidateEndpointHost(builtRequest.Host); err != nil { + return nil, metadata, err + } + + resp, err := c.client.Do(builtRequest) + if resp == nil { + // Ensure a http response value is always present to prevent unexpected + // panics. + resp = &http.Response{ + Header: http.Header{}, + Body: http.NoBody, + } + } + if err != nil { + err = &RequestSendError{Err: err} + + // Override the error with a context canceled error, if that was canceled. + select { + case <-ctx.Done(): + err = &smithy.CanceledError{Err: ctx.Err()} + default: + } + } + + // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. + // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the + // stream reference so that it can be safely reused. + if builtRequest.Body != nil { + _ = builtRequest.Body.Close() + } + + return &Response{Response: resp}, metadata, err +} + +// RequestSendError provides a generic request transport error. This error +// should wrap errors making HTTP client requests. +// +// The ClientHandler will wrap the HTTP client's error if the client request +// fails, and did not fail because of context canceled. +type RequestSendError struct { + Err error +} + +// ConnectionError returns that the error is related to not being able to send +// the request, or receive a response from the service. +func (e *RequestSendError) ConnectionError() bool { + return true +} + +// Unwrap returns the underlying error, if there was one. +func (e *RequestSendError) Unwrap() error { + return e.Err +} + +func (e *RequestSendError) Error() string { + return fmt.Sprintf("request send failed, %v", e.Err) +} + +// NopClient provides a client that ignores the request, and returns an empty +// successful HTTP response value. +type NopClient struct{} + +// Do ignores the request and returns a 200 status empty response. +func (NopClient) Do(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go new file mode 100644 index 000000000..07366ac85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go @@ -0,0 +1,5 @@ +/* +Package http provides the HTTP transport client and request/response types +needed to round trip API operation calls with an service. +*/ +package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go new file mode 100644 index 000000000..cbc9deb4d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go @@ -0,0 +1,163 @@ +package http + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { + values := make([]string, 0, len(vs)) + + for i := 0; i < len(vs); i++ { + parts, err := splitFn(vs[i]) + if err != nil { + return nil, err + } + values = append(values, parts...) + } + + return values, nil +} + +// SplitHeaderListValues attempts to split the elements of the slice by commas, +// and return a list of all values separated. Returns error if unable to +// separate the values. +func SplitHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, quotedCommaSplit) +} + +func quotedCommaSplit(v string) (parts []string, err error) { + v = strings.TrimSpace(v) + + expectMore := true + for i := 0; i < len(v); i++ { + if unicode.IsSpace(rune(v[i])) { + continue + } + expectMore = false + + // leading space in part is ignored. + // Start of value must be non-space, or quote. + // + // - If quote, enter quoted mode, find next non-escaped quote to + // terminate the value. + // - Otherwise, find next comma to terminate value. + + remaining := v[i:] + + var value string + var valueLen int + if remaining[0] == '"' { + //------------------------------ + // Quoted value + //------------------------------ + var j int + var skipQuote bool + for j += 1; j < len(remaining); j++ { + if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { + skipQuote = !skipQuote + continue + } + if remaining[j] == '"' { + break + } + } + if j == len(remaining) || j == 1 { + return nil, fmt.Errorf("value %v missing closing double quote", + remaining) + } + valueLen = j + 1 + + tail := remaining[valueLen:] + var k int + for ; k < len(tail); k++ { + if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { + return nil, fmt.Errorf("value %v has non-space trailing characters", + remaining) + } + if tail[k] == ',' { + expectMore = true + break + } + } + value = remaining[:valueLen] + value, err = strconv.Unquote(value) + if err != nil { + return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) + } + + // Pad valueLen to include trailing space(s) so `i` is updated correctly. + valueLen += k + + } else { + //------------------------------ + // Unquoted value + //------------------------------ + + // Index of the next comma is the length of the value, or end of string. + valueLen = strings.Index(remaining, ",") + if valueLen != -1 { + expectMore = true + } else { + valueLen = len(remaining) + } + value = strings.TrimSpace(remaining[:valueLen]) + } + + i += valueLen + parts = append(parts, value) + + } + + if expectMore { + parts = append(parts, "") + } + + return parts, nil +} + +// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date +// timestamp values in the slice by commas, and return a list of all values +// separated. The split is aware of the HTTP-Date timestamp format, and will skip +// comma within the timestamp value. Returns an error if unable to split the +// timestamp values. +func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, splitHTTPDateHeaderValue) +} + +func splitHTTPDateHeaderValue(v string) ([]string, error) { + if n := strings.Count(v, ","); n <= 1 { + // Nothing to do if only contains a no, or single HTTPDate value + return []string{v}, nil + } else if n%2 == 0 { + return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) + } + + var parts []string + var i, j int + + var doSplit bool + for ; i < len(v); i++ { + if v[i] == ',' { + if doSplit { + doSplit = false + parts = append(parts, strings.TrimSpace(v[j:i])) + j = i + 1 + } else { + // Skip the first comma in the timestamp value since that + // separates the day from the rest of the timestamp. + // + // Tue, 17 Dec 2019 23:48:18 GMT + doSplit = true + } + } + } + // Add final part + if j < len(v) { + parts = append(parts, strings.TrimSpace(v[j:])) + } + + return parts, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go new file mode 100644 index 000000000..6b290fec0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -0,0 +1,89 @@ +package http + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(host string) error { + var errors strings.Builder + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + if err != nil { + errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) + errors.WriteString(err.Error()) + } + + if !ValidPortNumber(port) { + errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") + errors.WriteString(label) + } + } + + if len(hostname) == 0 && len(port) != 0 { + errors.WriteString("\nendpoint host with port must not be empty") + } + + if len(hostname) > 255 { + errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) + } + + if len(errors.String()) > 0 { + return fmt.Errorf("invalid endpoint host%s", errors.String()) + } + return nil +} + +// ValidPortNumber returns whether the port is valid RFC 3986 port. +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} + +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go new file mode 100644 index 000000000..941a8d6b5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go @@ -0,0 +1,75 @@ +package io + +import ( + "io" + "sync" +) + +// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. +func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { + sr := &safeReadCloser{ + readCloser: readCloser, + } + + if _, ok := readCloser.(io.WriterTo); ok { + return &safeWriteToReadCloser{safeReadCloser: sr} + } + + return sr +} + +// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic +// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this +// type. +type safeWriteToReadCloser struct { + *safeReadCloser +} + +// WriteTo implements the io.WriteTo interface. +func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { + r.safeReadCloser.mtx.Lock() + defer r.safeReadCloser.mtx.Unlock() + + if r.safeReadCloser.closed { + return 0, io.EOF + } + + return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) +} + +// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser +// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type +// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime +// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. +// This type is thread-safe. +type safeReadCloser struct { + readCloser io.ReadCloser + closed bool + mtx sync.Mutex +} + +// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. +func (r *safeReadCloser) Read(p []byte) (n int, err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return 0, io.EOF + } + + return r.readCloser.Read(p) +} + +// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error +// reported from Close. Subsequent calls to Close will always return a nil error. +func (r *safeReadCloser) Close() error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return nil + } + + r.closed = true + rc := r.readCloser + r.readCloser = nil + return rc.Close() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go new file mode 100644 index 000000000..5d6a4b23a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go @@ -0,0 +1,25 @@ +package http + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" +) + +// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. +// Returns the byte slice of md5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + // copy errors may be assumed to be from the body. + _, err := io.Copy(h, r) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + + // encode the md5 checksum in base64. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go new file mode 100644 index 000000000..1d3b218a1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +) + +// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically +// close the response body of an operation request if the request response +// failed. +func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) +} + +type errorCloseResponseBodyMiddleware struct{} + +func (*errorCloseResponseBodyMiddleware) ID() string { + return "ErrorCloseResponseBody" +} + +func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { + // Consume the full body to prevent TCP connection resets on some platforms + _, _ = io.Copy(ioutil.Discard, resp.Body) + // Do not validate that the response closes successfully. + resp.Body.Close() + } + } + + return out, metadata, err +} + +// AddCloseResponseBodyMiddleware adds the middleware to automatically close +// the response body of an operation request, after the response had been +// deserialized. +func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) +} + +type closeResponseBody struct{} + +func (*closeResponseBody) ID() string { + return "CloseResponseBody" +} + +func (m *closeResponseBody) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + return out, metadata, err + } + + if resp, ok := out.RawResponse.(*Response); ok { + // Consume the full body to prevent TCP connection resets on some platforms + _, copyErr := io.Copy(ioutil.Discard, resp.Body) + if copyErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") + } + + closeErr := resp.Body.Close() + if closeErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go new file mode 100644 index 000000000..9969389bb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go @@ -0,0 +1,84 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +// ComputeContentLength provides a middleware to set the content-length +// header for the length of a serialize request body. +type ComputeContentLength struct { +} + +// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware +// stack's Build step. +func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ComputeContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *ComputeContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // do nothing if request content-length was set to 0 or above. + if req.ContentLength >= 0 { + return next.HandleBuild(ctx, in) + } + + // attempt to compute stream length + if n, ok, err := req.StreamLength(); err != nil { + return out, metadata, fmt.Errorf( + "failed getting length of request stream, %w", err) + } else if ok { + req.ContentLength = n + } + + return next.HandleBuild(ctx, in) +} + +// validateContentLength provides a middleware to validate the content-length +// is valid (greater than zero), for the serialized request payload. +type validateContentLength struct{} + +// ValidateContentLengthHeader adds middleware that validates request content-length +// is set to value greater than zero. +func ValidateContentLengthHeader(stack *middleware.Stack) error { + return stack.Build.Add(&validateContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *validateContentLength) ID() string { return "ValidateContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *validateContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if request content-length was set to less than 0, return an error + if req.ContentLength < 0 { + return out, metadata, fmt.Errorf( + "content length for payload is required and must be at least 0") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go new file mode 100644 index 000000000..49884e6af --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -0,0 +1,88 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +type headerValue struct { + header string + value string + append bool +} + +type headerValueHelper struct { + headerValues []headerValue +} + +func (h *headerValueHelper) addHeaderValue(value headerValue) { + h.headerValues = append(h.headerValues, value) +} + +func (h *headerValueHelper) ID() string { + return "HTTPHeaderHelper" +} + +func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for _, value := range h.headerValues { + if value.append { + req.Header.Add(value.header, value.value) + } else { + req.Header.Set(value.header, value.value) + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { + id := (*headerValueHelper)(nil).ID() + m, ok := stack.Build.Get(id) + if !ok { + m = &headerValueHelper{} + err := stack.Build.Add(m, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := m.(*headerValueHelper) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) + } + + return requestUserAgent, nil +} + +// AddHeaderValue returns a stack mutator that adds the header value pair to header. +// Appends to any existing values if present. +func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: true}) + return nil + } +} + +// SetHeaderValue returns a stack mutator that adds the header value pair to header. +// Replaces any existing values if present. +func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: false}) + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go new file mode 100644 index 000000000..d5909b0a2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go @@ -0,0 +1,75 @@ +package http + +import ( + "context" + "fmt" + "net/http/httputil" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally +// their respective bodies. Will not perform any logging if none of the options are set. +type RequestResponseLogger struct { + LogRequest bool + LogRequestWithBody bool + + LogResponse bool + LogResponseWithBody bool +} + +// ID is the middleware identifier. +func (r *RequestResponseLogger) ID() string { + return "RequestResponseLogger" +} + +// HandleDeserialize will log the request and response HTTP messages if configured accordingly. +func (r *RequestResponseLogger) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + if r.LogRequest || r.LogRequestWithBody { + smithyRequest, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + rc := smithyRequest.Build(ctx) + reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) + if err != nil { + return out, metadata, err + } + + logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) + + if r.LogRequestWithBody { + smithyRequest, err = smithyRequest.SetStream(rc.Body) + if err != nil { + return out, metadata, err + } + in.Request = smithyRequest + } + } + + out, metadata, err = next.HandleDeserialize(ctx, in) + + if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { + smithyResponse, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) + if err != nil { + return out, metadata, fmt.Errorf("failed to dump response %w", err) + } + + logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go new file mode 100644 index 000000000..d6079b259 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type ( + hostnameImmutableKey struct{} + hostPrefixDisableKey struct{} +) + +// GetHostnameImmutable retrieves whether the endpoint hostname should be considered +// immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func GetHostnameImmutable(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) + return v +} + +// SetHostnameImmutable sets or modifies whether the request's endpoint hostname +// should be considered immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func SetHostnameImmutable(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) +} + +// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is +// disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) + return v +} + +// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host +// prefixing should be disabled. If value is true, endpoint host prefixing +// will be disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go new file mode 100644 index 000000000..326cb8a6c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "strings" +) + +// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum +// HTTP protocol version. +type MinimumProtocolError struct { + proto string + expectedProtoMajor int + expectedProtoMinor int +} + +// Error returns the error message. +func (m *MinimumProtocolError) Error() string { + return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + m.expectedProtoMajor, m.expectedProtoMinor, m.proto) +} + +// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection +// meets the minimum major ad minor version. +type RequireMinimumProtocol struct { + ProtoMajor int + ProtoMinor int +} + +// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum +// protocol major and minor version. +func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { + return stack.Deserialize.Insert(&RequireMinimumProtocol{ + ProtoMajor: major, + ProtoMinor: minor, + }, "OperationDeserializer", middleware.Before) +} + +// ID returns the middleware identifier string. +func (r *RequireMinimumProtocol) ID() string { + return "RequireMinimumProtocol" +} + +// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor +// protocol version. +func (r *RequireMinimumProtocol) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + + if !strings.HasPrefix(response.Proto, "HTTP") { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go new file mode 100644 index 000000000..ffac684f4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -0,0 +1,180 @@ +package http + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + iointernal "github.com/aws/smithy-go/transport/http/internal/io" +) + +// Request provides the HTTP specific request structure for HTTP specific +// middleware steps to use to serialize input, and send an operation's request. +type Request struct { + *http.Request + stream io.Reader + isStreamSeekable bool + streamStartPos int64 +} + +// NewStackRequest returns an initialized request ready to be populated with the +// HTTP request details. Returns empty interface so the function can be used as +// a parameter to the Smithy middleware Stack constructor. +func NewStackRequest() interface{} { + return &Request{ + Request: &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + ContentLength: -1, // default to unknown length + }, + } +} + +// Clone returns a deep copy of the Request for the new context. A reference to +// the Stream is copied, but the underlying stream is not copied. +func (r *Request) Clone() *Request { + rc := *r + rc.Request = rc.Request.Clone(context.TODO()) + return &rc +} + +// StreamLength returns the number of bytes of the serialized stream attached +// to the request and ok set. If the length cannot be determined, an error will +// be returned. +func (r *Request) StreamLength() (size int64, ok bool, err error) { + return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) +} + +func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { + if stream == nil { + return 0, true, nil + } + + if l, ok := stream.(interface{ Len() int }); ok { + return int64(l.Len()), true, nil + } + + if !seekable { + return 0, false, nil + } + + s := stream.(io.Seeker) + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, false, err + } + + // The reason to seek to streamStartPos instead of 0 is to ensure that the + // SDK only sends the stream from the starting position the user's + // application provided it to the SDK at. For example application opens a + // file, and wants to skip the first N bytes uploading the rest. The + // application would move the file's offset N bytes, then hand it off to + // the SDK to send the remaining. The SDK should respect that initial offset. + _, err = s.Seek(startPos, io.SeekStart) + if err != nil { + return 0, false, err + } + + return endOffset - startPos, true, nil +} + +// RewindStream will rewind the io.Reader to the relative start position if it +// is an io.Seeker. +func (r *Request) RewindStream() error { + // If there is no stream there is nothing to rewind. + if r.stream == nil { + return nil + } + + if !r.isStreamSeekable { + return fmt.Errorf("request stream is not seekable") + } + _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) + return err +} + +// GetStream returns the request stream io.Reader if a stream is set. If no +// stream is present nil will be returned. +func (r *Request) GetStream() io.Reader { + return r.stream +} + +// IsStreamSeekable returns whether the stream is seekable. +func (r *Request) IsStreamSeekable() bool { + return r.isStreamSeekable +} + +// SetStream returns a clone of the request with the stream set to the provided +// reader. May return an error if the provided reader is seekable but returns +// an error. +func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { + rc = r.Clone() + + if reader == http.NoBody { + reader = nil + } + + var isStreamSeekable bool + var streamStartPos int64 + switch v := reader.(type) { + case io.Seeker: + n, err := v.Seek(0, io.SeekCurrent) + if err != nil { + return r, err + } + isStreamSeekable = true + streamStartPos = n + default: + // If the stream length can be determined, and is determined to be empty, + // use a nil stream to prevent confusion between empty vs not-empty + // streams. + length, ok, err := streamLength(reader, false, 0) + if err != nil { + return nil, err + } else if ok && length == 0 { + reader = nil + } + } + + rc.stream = reader + rc.isStreamSeekable = isStreamSeekable + rc.streamStartPos = streamStartPos + + return rc, err +} + +// Build returns a build standard HTTP request value from the Smithy request. +// The request's stream is wrapped in a safe container that allows it to be +// reused for subsequent attempts. +func (r *Request) Build(ctx context.Context) *http.Request { + req := r.Request.Clone(ctx) + + if r.stream == nil && req.ContentLength == -1 { + req.ContentLength = 0 + } + + switch stream := r.stream.(type) { + case *io.PipeReader: + req.Body = ioutil.NopCloser(stream) + req.ContentLength = -1 + default: + // HTTP Client Request must only have a non-nil body if the + // ContentLength is explicitly unknown (-1) or non-zero. The HTTP + // Client will interpret a non-nil body and ContentLength 0 as + // "unknown". This is unwanted behavior. + if req.ContentLength != 0 && r.stream != nil { + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + } + } + + return req +} + +// RequestCloner is a function that can take an input request type and clone the request +// for use in a subsequent retry attempt. +func RequestCloner(v interface{}) interface{} { + return v.(*Request).Clone() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go new file mode 100644 index 000000000..0c13bfcc8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/response.go @@ -0,0 +1,34 @@ +package http + +import ( + "fmt" + "net/http" +) + +// Response provides the HTTP specific response structure for HTTP specific +// middleware steps to use to deserialize the response from an operation call. +type Response struct { + *http.Response +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value. +type ResponseError struct { + Response *Response + Err error +} + +// HTTPStatusCode returns the HTTP response status code received from the service. +func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } + +// HTTPResponse returns the HTTP response received from the service. +func (e *ResponseError) HTTPResponse() *Response { return e.Response } + +// Unwrap returns the nested error if any, or nil. +func (e *ResponseError) Unwrap() error { return e.Err } + +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "http response error StatusCode: %d, %v", + e.Response.StatusCode, e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go new file mode 100644 index 000000000..607b196a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/time.go @@ -0,0 +1,13 @@ +package http + +import ( + "time" + + smithytime "github.com/aws/smithy-go/time" +) + +// ParseTime parses a time string like the HTTP Date header. This uses a more +// relaxed rule set for date parsing compared to the standard library. +func ParseTime(text string) (t time.Time, err error) { + return smithytime.ParseHTTPDate(text) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go new file mode 100644 index 000000000..60a5fc100 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/url.go @@ -0,0 +1,44 @@ +package http + +import "strings" + +// JoinPath returns an absolute URL path composed of the two paths provided. +// Enforces that the returned path begins with '/'. If added path is empty the +// returned path suffix will match the first parameter suffix. +func JoinPath(a, b string) string { + if len(a) == 0 { + a = "/" + } else if a[0] != '/' { + a = "/" + a + } + + if len(b) != 0 && b[0] == '/' { + b = b[1:] + } + + if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { + a = a + "/" + } + + return a + b +} + +// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' +// will be collapsed to single separator between values. +func JoinRawQuery(a, b string) string { + a = strings.TrimFunc(a, isAmpersand) + b = strings.TrimFunc(b, isAmpersand) + + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + return a + "&" + b +} + +func isAmpersand(v rune) bool { + return v == '&' +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go new file mode 100644 index 000000000..71a7e0d8a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go @@ -0,0 +1,37 @@ +package http + +import ( + "strings" +) + +// UserAgentBuilder is a builder for a HTTP User-Agent string. +type UserAgentBuilder struct { + sb strings.Builder +} + +// NewUserAgentBuilder returns a new UserAgentBuilder. +func NewUserAgentBuilder() *UserAgentBuilder { + return &UserAgentBuilder{sb: strings.Builder{}} +} + +// AddKey adds the named component/product to the agent string +func (u *UserAgentBuilder) AddKey(key string) { + u.appendTo(key) +} + +// AddKeyValue adds the named key to the agent string with the given value. +func (u *UserAgentBuilder) AddKeyValue(key, value string) { + u.appendTo(key + "/" + value) +} + +// Build returns the constructed User-Agent string. May be called multiple times. +func (u *UserAgentBuilder) Build() string { + return u.sb.String() +} + +func (u *UserAgentBuilder) appendTo(value string) { + if u.sb.Len() > 0 { + u.sb.WriteRune(' ') + } + u.sb.WriteString(value) +} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go new file mode 100644 index 000000000..b5eedc1f9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/validation.go @@ -0,0 +1,140 @@ +package smithy + +import ( + "bytes" + "fmt" + "strings" +) + +// An InvalidParamsError provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type InvalidParamsError struct { + // Context is the base context of the invalid parameter group. + Context string + errs []InvalidParamError +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *InvalidParamsError) Add(err InvalidParamError) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another InvalidParamsError +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e *InvalidParamsError) Len() int { + return len(e.errs) +} + +// Error returns the string formatted form of the invalid parameters. +func (e InvalidParamsError) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Error()) + } + + return w.String() +} + +// Errs returns a slice of the invalid parameters +func (e InvalidParamsError) Errs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An InvalidParamError represents an invalid parameter error type. +type InvalidParamError interface { + error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type invalidParamError struct { + context string + nestedContext string + field string + reason string +} + +// Error returns the string version of the invalid parameter error. +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +// Field Returns the field and context the error occurred. +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { + sb.WriteRune('.') + } + } + if len(e.nestedContext) > 0 { + sb.WriteString(e.nestedContext) + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +// SetContext updates the base context of the error. +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *invalidParamError) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + return + } + // Check if our nested context is an index into a slice or map + if e.nestedContext[:1] != "[" { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + return + } + e.nestedContext = ctx + e.nestedContext +} + +// An ParamRequiredError represents an required parameter error. +type ParamRequiredError struct { + invalidParamError +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ParamRequiredError { + return &ParamRequiredError{ + invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + }, + } +} diff --git a/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.go b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.go new file mode 100644 index 000000000..4aef84787 --- /dev/null +++ b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfs + +import ( + "bytes" + _ "embed" + "fmt" + "text/template" + + "github.com/ghodss/yaml" + storagev1 "k8s.io/api/storage/v1" +) + +//go:embed csidriver.yaml +var csiDriver string + +type CSIDriverValues struct { + Name string +} + +var CSIDriverDefaults = CSIDriverValues{ + Name: "nfs.csi.ceph.com", +} + +// NewCSIDriver takes a driver name from the CSIDriverValues struct and +// replaces the value in the template. A CSIDriver object is returned which can +// be created in the Kubernetes cluster. +func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) { + data, err := NewCSIDriverYAML(values) + if err != nil { + return nil, err + } + + driver := &storagev1.CSIDriver{} + err = yaml.Unmarshal([]byte(data), driver) + if err != nil { + return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err) + } + + return driver, nil +} + +// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and +// replaces the value in the template. A CSIDriver object in YAML is returned +// which can be created in the Kubernetes cluster. +func NewCSIDriverYAML(values CSIDriverValues) (string, error) { + var buf bytes.Buffer + + tmpl, err := template.New("CSIDriver").Parse(csiDriver) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + err = tmpl.Execute(&buf, values) + if err != nil { + return "", fmt.Errorf("failed to replace values in template: %w", err) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.yaml b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.yaml new file mode 100644 index 000000000..97afbc50e --- /dev/null +++ b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs/csidriver.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "{{ .Name }}" +spec: + attachRequired: false + volumeLifecycleModes: + - Persistent diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..2a5446762 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -36,7 +36,6 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) @@ -319,7 +318,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +341,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +368,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afd..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d4..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 3d45c1a47..c71003463 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -178,7 +178,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } @@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..76c04fdbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -207,9 +207,10 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, // Check whether this is a []byte of text data. if t.Elem() == reflect.TypeOf(byte(0)) { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) + skipType = true return opts.WithTypeMode(emitType).FormatType(t, out) } } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 168f92f3c..68b5c1ae1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -7,6 +7,7 @@ package cmp import ( "bytes" "fmt" + "math" "reflect" "strconv" "strings" @@ -79,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -128,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + isPureLinedText = efficiencyLines < 4*efficiencyBytes + } } // Format the string into printable records. @@ -138,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -229,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -237,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. @@ -299,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isText { + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -338,8 +354,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -364,6 +383,7 @@ func (opts formatOptions) formatDiffSlice( groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { @@ -416,25 +436,36 @@ func (opts formatOptions) formatDiffSlice( // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -444,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -463,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +// +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { + numLeadingIdentical++ + } + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go index 86595175b..52be1e785 100644 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -2,6 +2,7 @@ package api import ( "context" + "net/http" ) // TokenAuth is used to perform token backend operations on Vault @@ -15,14 +16,19 @@ func (a *Auth) Token() *TokenAuth { } func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/create") + return c.CreateWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create") if err := r.SetJSONBody(opts); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -32,14 +38,19 @@ func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { } func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/create-orphan") + return c.CreateOrphanWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateOrphanWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create-orphan") if err := r.SetJSONBody(opts); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -49,14 +60,19 @@ func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { } func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName) + return c.CreateWithRoleWithContext(context.Background(), opts, roleName) +} + +func (c *TokenAuth) CreateWithRoleWithContext(ctx context.Context, opts *TokenCreateRequest, roleName string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create/"+roleName) if err := r.SetJSONBody(opts); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -66,16 +82,21 @@ func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (* } func (c *TokenAuth) Lookup(token string) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/lookup") + return c.LookupWithContext(context.Background(), token) +} + +func (c *TokenAuth) LookupWithContext(ctx context.Context, token string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup") if err := r.SetJSONBody(map[string]interface{}{ "token": token, }); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -85,16 +106,21 @@ func (c *TokenAuth) Lookup(token string) (*Secret, error) { } func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor") + return c.LookupAccessorWithContext(context.Background(), accessor) +} + +func (c *TokenAuth) LookupAccessorWithContext(ctx context.Context, accessor string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup-accessor") if err := r.SetJSONBody(map[string]interface{}{ "accessor": accessor, }); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -104,11 +130,16 @@ func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { } func (c *TokenAuth) LookupSelf() (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") + return c.LookupSelfWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *TokenAuth) LookupSelfWithContext(ctx context.Context) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/auth/token/lookup-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -118,7 +149,14 @@ func (c *TokenAuth) LookupSelf() (*Secret, error) { } func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/renew-accessor") + return c.RenewAccessorWithContext(context.Background(), accessor, increment) +} + +func (c *TokenAuth) RenewAccessorWithContext(ctx context.Context, accessor string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/renew-accessor") if err := r.SetJSONBody(map[string]interface{}{ "accessor": accessor, "increment": increment, @@ -126,9 +164,7 @@ func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, erro return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -138,7 +174,14 @@ func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, erro } func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/auth/token/renew") + return c.RenewWithContext(context.Background(), token, increment) +} + +func (c *TokenAuth) RenewWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew") if err := r.SetJSONBody(map[string]interface{}{ "token": token, "increment": increment, @@ -146,9 +189,7 @@ func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -158,16 +199,21 @@ func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { } func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + return c.RenewSelfWithContext(context.Background(), increment) +} + +func (c *TokenAuth) RenewSelfWithContext(ctx context.Context, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") body := map[string]interface{}{"increment": increment} if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -176,10 +222,18 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { return ParseSecret(resp.Body) } -// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided -// token instead of the token attached to the client. +// RenewTokenAsSelf wraps RenewTokenAsSelfWithContext using context.Background. func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + return c.RenewTokenAsSelfWithContext(context.Background(), token, increment) +} + +// RenewTokenAsSelfWithContext behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelfWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") r.ClientToken = token body := map[string]interface{}{"increment": increment} @@ -187,9 +241,7 @@ func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, erro return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -198,19 +250,25 @@ func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, erro return ParseSecret(resp.Body) } -// RevokeAccessor revokes a token associated with the given accessor -// along with all the child tokens. +// RevokeAccessor wraps RevokeAccessorWithContext using context.Background. func (c *TokenAuth) RevokeAccessor(accessor string) error { - r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor") + return c.RevokeAccessorWithContext(context.Background(), accessor) +} + +// RevokeAccessorWithContext revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessorWithContext(ctx context.Context, accessor string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/revoke-accessor") if err := r.SetJSONBody(map[string]interface{}{ "accessor": accessor, }); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -219,19 +277,25 @@ func (c *TokenAuth) RevokeAccessor(accessor string) error { return nil } -// RevokeOrphan revokes a token without revoking the tree underneath it (so -// child tokens are orphaned rather than revoked) +// RevokeOrphan wraps RevokeOrphanWithContext using context.Background. func (c *TokenAuth) RevokeOrphan(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan") + return c.RevokeOrphanWithContext(context.Background(), token) +} + +// RevokeOrphanWithContext revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphanWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-orphan") if err := r.SetJSONBody(map[string]interface{}{ "token": token, }); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -240,15 +304,21 @@ func (c *TokenAuth) RevokeOrphan(token string) error { return nil } -// RevokeSelf revokes the token making the call. The `token` parameter is kept +// RevokeSelf wraps RevokeSelfWithContext using context.Background. +func (c *TokenAuth) RevokeSelf(token string) error { + return c.RevokeSelfWithContext(context.Background(), token) +} + +// RevokeSelfWithContext revokes the token making the call. The `token` parameter is kept // for backwards compatibility but is ignored; only the client's set token has // an effect. -func (c *TokenAuth) RevokeSelf(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") - - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *TokenAuth) RevokeSelfWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -257,20 +327,26 @@ func (c *TokenAuth) RevokeSelf(token string) error { return nil } -// RevokeTree is the "normal" revoke operation that revokes the given token and +// RevokeTree wraps RevokeTreeWithContext using context.Background. +func (c *TokenAuth) RevokeTree(token string) error { + return c.RevokeTreeWithContext(context.Background(), token) +} + +// RevokeTreeWithContext is the "normal" revoke operation that revokes the given token and // the entire tree underneath -- all of its child tokens, their child tokens, // etc. -func (c *TokenAuth) RevokeTree(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke") +func (c *TokenAuth) RevokeTreeWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke") if err := r.SetJSONBody(map[string]interface{}{ "token": token, }); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 6a804091a..99813a21b 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -53,6 +53,14 @@ const ( HeaderIndex = "X-Vault-Index" HeaderForward = "X-Vault-Forward" HeaderInconsistent = "X-Vault-Inconsistent" + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + "but the client is configured to use TLS. Please either enable TLS\n" + + "on the server or run the client with -address set to an address\n" + + "that uses the http protocol:\n\n" + + " vault -address http://
\n\n" + + "You can also set the VAULT_ADDR environment variable:\n\n\n" + + " VAULT_ADDR=http://
vault \n\n" + + "where
is replaced by the actual address to the server." ) // Deprecated values @@ -1089,6 +1097,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // RawRequest performs the raw request given. This request may be against // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. func (c *Client) RawRequest(r *Request) (*Response, error) { return c.RawRequestWithContext(context.Background(), r) } @@ -1096,7 +1107,19 @@ func (c *Client) RawRequest(r *Request) (*Response, error) { // RawRequestWithContext performs the raw request given. This request may be against // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + // Note: we purposefully do not call cancel manually. The reason is + // when canceled, the request.Body will EOF when reading due to the way + // it streams data in. Cancel will still be run when the timeout is + // hit, so this doesn't really harm anything. + ctx, _ = c.withConfiguredTimeout(ctx) + return c.rawRequestWithContext(ctx, r) +} + +func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { c.modifyLock.RLock() token := c.token @@ -1108,7 +1131,6 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient - timeout := c.config.Timeout outputCurlString := c.config.OutputCurlString logger := c.config.Logger c.config.modifyLock.RUnlock() @@ -1127,12 +1149,9 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon limiter.Wait(ctx) } - // Sanity check the token before potentially erroring from the API - idx := strings.IndexFunc(token, func(c rune) bool { - return !unicode.IsPrint(c) - }) - if idx != -1 { - return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err } redirectCount := 0 @@ -1157,13 +1176,6 @@ START: return nil, LastOutputStringError } - if timeout != 0 { - // Note: we purposefully do not call cancel manually. The reason is - // when canceled, the request.Body will EOF when reading due to the way - // it streams data in. Cancel will still be run when the timeout is - // hit, so this doesn't really harm anything. - ctx, _ = context.WithTimeout(ctx, timeout) - } req.Request = req.Request.WithContext(ctx) if backoff == nil { @@ -1192,17 +1204,7 @@ START: } if err != nil { if strings.Contains(err.Error(), "tls: oversized") { - err = errwrap.Wrapf( - "{{err}}\n\n"+ - "This error usually means that the server is running with TLS disabled\n"+ - "but the client is configured to use TLS. Please either enable TLS\n"+ - "on the server or run the client with -address set to an address\n"+ - "that uses the http protocol:\n\n"+ - " vault -address http://
\n\n"+ - "You can also set the VAULT_ADDR environment variable:\n\n\n"+ - " VAULT_ADDR=http://
vault \n\n"+ - "where
is replaced by the actual address to the server.", - err) + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) } return result, err } @@ -1249,6 +1251,120 @@ START: return result, nil } +// httpRequestWithContext avoids the use of the go-retryable library found in RawRequestWithContext and is +// useful when making calls where a net/http client is desirable. A single redirect (status code 301, 302, +// or 307) will be followed but all retry and timeout logic is the responsibility of the caller as is +// closing the Response body. +func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + req, err := http.NewRequestWithContext(ctx, r.Method, r.URL.RequestURI(), r.Body) + if err != nil { + return nil, err + } + + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + httpClient := c.config.HttpClient + outputCurlString := c.config.OutputCurlString + if c.headers != nil { + for header, vals := range c.headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + + // OutputCurlString logic relies on the request type to be retryable.Request as + if outputCurlString { + return nil, fmt.Errorf("output-curl-string is not implemented for this request") + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + var result *Response + + resp, err := httpClient.Do(req) + + if resp != nil { + result = &Response{Response: resp} + } + + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect + if resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Retry the request + resp, err = httpClient.Do(req) + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + } + + if err := result.Error(); err != nil { + return nil, err + } + + return result, nil +} + type ( RequestCallback func(*Request) ResponseCallback func(*Response) @@ -1278,6 +1394,17 @@ func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client { return &c2 } +// withConfiguredTimeout wraps the context with a timeout from the client configuration. +func (c *Client) withConfiguredTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + timeout := c.ClientTimeout() + + if timeout > 0 { + return context.WithTimeout(ctx, timeout) + } + + return ctx, func() {} +} + // RecordState returns a response callback that will record the state returned // by Vault in a response header. func RecordState(state *string) ResponseCallback { @@ -1466,3 +1593,14 @@ func (w *replicationStateStore) states() []string { copy(c, w.store) return c } + +// validateToken will check for non-printable characters to prevent a call that will fail at the api +func validateToken(t string) error { + idx := strings.IndexFunc(t, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go index 321bd597c..0988ebcd1 100644 --- a/vendor/github.com/hashicorp/vault/api/help.go +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -3,16 +3,23 @@ package api import ( "context" "fmt" + "net/http" ) -// Help reads the help information for the given path. +// Help wraps HelpWithContext using context.Background. func (c *Client) Help(path string) (*Help, error) { - r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) + return c.HelpWithContext(context.Background(), path) +} + +// HelpWithContext reads the help information for the given path. +func (c *Client) HelpWithContext(ctx context.Context, path string) (*Help, error) { + ctx, cancelFunc := c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/%s", path)) r.Params.Add("help", "1") - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.RawRequestWithContext(ctx, r) + resp, err := c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index f7d2b4a40..39d61b96a 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "io" + "net/http" "net/url" "os" "strings" @@ -30,7 +31,7 @@ var ( return os.Getenv(EnvVaultWrapTTL) } - if (operation == "PUT" || operation == "POST") && path == "sys/wrapping/wrap" { + if (operation == http.MethodPut || operation == http.MethodPost) && path == "sys/wrapping/wrap" { return DefaultWrappingTTL } @@ -49,11 +50,22 @@ func (c *Client) Logical() *Logical { } func (c *Logical) Read(path string) (*Secret, error) { - return c.ReadWithData(path, nil) + return c.ReadWithDataWithContext(context.Background(), path, nil) +} + +func (c *Logical) ReadWithContext(ctx context.Context, path string) (*Secret, error) { + return c.ReadWithDataWithContext(ctx, path, nil) } func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/"+path) + return c.ReadWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) var values url.Values for k, v := range data { @@ -69,9 +81,7 @@ func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, r.Params = values } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } @@ -97,15 +107,20 @@ func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, } func (c *Logical) List(path string) (*Secret, error) { + return c.ListWithContext(context.Background(), path) +} + +func (c *Logical) ListWithContext(ctx context.Context, path string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + r := c.c.NewRequest("LIST", "/v1/"+path) // Set this for broader compatibility, but we use LIST above to be able to // handle the wrapping lookup function - r.Method = "GET" + r.Method = http.MethodGet r.Params.Set("list", "true") - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } @@ -131,10 +146,11 @@ func (c *Logical) List(path string) (*Secret, error) { } func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() + return c.WriteWithContext(context.Background(), path, data) +} - r := c.c.NewRequest("PUT", "/v1/"+path) +func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) if err := r.SetJSONBody(data); err != nil { return nil, err } @@ -143,7 +159,7 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro } func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { - r := c.c.NewRequest("PATCH", "/v1/"+path) + r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) r.Headers.Set("Content-Type", "application/merge-patch+json") if err := r.SetJSONBody(data); err != nil { return nil, err @@ -153,14 +169,21 @@ func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[stri } func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/"+path) + return c.WriteBytesWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteBytesWithContext(ctx context.Context, path string, data []byte) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) r.BodyBytes = data - return c.write(context.Background(), path, r) + return c.write(ctx, path, r) } func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) { - resp, err := c.c.RawRequestWithContext(ctx, request) + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) if resp != nil { defer resp.Body.Close() } @@ -185,11 +208,22 @@ func (c *Logical) write(ctx context.Context, path string, request *Request) (*Se } func (c *Logical) Delete(path string) (*Secret, error) { - return c.DeleteWithData(path, nil) + return c.DeleteWithContext(context.Background(), path) +} + +func (c *Logical) DeleteWithContext(ctx context.Context, path string) (*Secret, error) { + return c.DeleteWithDataWithContext(ctx, path, nil) } func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret, error) { - r := c.c.NewRequest("DELETE", "/v1/"+path) + return c.DeleteWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) DeleteWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/"+path) var values url.Values for k, v := range data { @@ -205,9 +239,7 @@ func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret r.Params = values } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } @@ -232,6 +264,13 @@ func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret } func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + return c.UnwrapWithContext(context.Background(), wrappingToken) +} + +func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + var data map[string]interface{} wt := strings.TrimSpace(wrappingToken) if wrappingToken != "" { @@ -244,14 +283,12 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { } } - r := c.c.NewRequest("PUT", "/v1/sys/wrapping/unwrap") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/wrapping/unwrap") if err := r.SetJSONBody(data); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go index b30c06eee..9129ea0c3 100644 --- a/vendor/github.com/hashicorp/vault/api/output_string.go +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "net/http" "strings" retryablehttp "github.com/hashicorp/go-retryablehttp" @@ -45,7 +46,7 @@ func (d *OutputStringError) parseRequest() { if d.TLSSkipVerify { d.parsedCurlString += "--insecure " } - if d.Request.Method != "GET" { + if d.Request.Method != http.MethodGet { d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method) } if d.ClientCACert != "" { diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index 9acd6a58a..e7da60cc5 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -1,6 +1,7 @@ package api import ( + "context" "crypto/tls" "crypto/x509" "encoding/base64" @@ -67,9 +68,14 @@ func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { return nil } -// VaultPluginTLSProvider is run inside a plugin and retrieves the response -// wrapped TLS certificate from vault. It returns a configured TLS Config. +// VaultPluginTLSProvider wraps VaultPluginTLSProviderContext using context.Background. func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + return VaultPluginTLSProviderContext(context.Background(), apiTLSConfig) +} + +// VaultPluginTLSProviderContext is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { if os.Getenv(PluginMetadataModeEnv) == "true" { return nil } @@ -121,7 +127,7 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) // Reset token value to make sure nothing has been set by default client.ClearToken() - secret, err := client.Logical().Unwrap(unwrapToken) + secret, err := client.Logical().UnwrapWithContext(ctx, unwrapToken) if err != nil { return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) } diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go index 837eac4ff..b832e2748 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh.go +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -3,6 +3,7 @@ package api import ( "context" "fmt" + "net/http" ) // SSH is used to return a client to invoke operations on SSH backend. @@ -24,16 +25,22 @@ func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { } } -// Credential invokes the SSH backend API to create a credential to establish an SSH session. +// Credential wraps CredentialWithContext using context.Background. func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + return c.CredentialWithContext(context.Background(), role, data) +} + +// CredentialWithContext invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) CredentialWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) if err := r.SetJSONBody(data); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -42,17 +49,23 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err return ParseSecret(resp.Body) } -// SignKey signs the given public key and returns a signed public key to pass -// along with the SSH request. +// SignKey wraps SignKeyWithContext using context.Background. func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + return c.SignKeyWithContext(context.Background(), role, data) +} + +// SignKeyWithContext signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKeyWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) if err := r.SetJSONBody(data); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go index fda70bcdd..505519b04 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -6,6 +6,7 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "net/http" "os" "github.com/hashicorp/errwrap" @@ -206,18 +207,24 @@ func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { // an echo response message is returned. This feature is used by ssh-helper to verify if // its configured correctly. func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + return c.VerifyWithContext(context.Background(), otp) +} + +// VerifyWithContext the same as Verify but with a custom context. +func (c *SSHHelper) VerifyWithContext(ctx context.Context, otp string) (*SSHVerifyResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + data := map[string]interface{}{ "otp": otp, } verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) - r := c.c.NewRequest("PUT", verifyPath) + r := c.c.NewRequest(http.MethodPut, verifyPath) if err := r.SetJSONBody(data); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go index d0c640836..7020256f4 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_audit.go +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -4,23 +4,29 @@ import ( "context" "errors" "fmt" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) AuditHash(path string, input string) (string, error) { + return c.AuditHashWithContext(context.Background(), path, input) +} + +func (c *Sys) AuditHashWithContext(ctx context.Context, path string, input string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "input": input, } - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit-hash/%s", path)) if err := r.SetJSONBody(body); err != nil { return "", err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return "", err } @@ -47,11 +53,16 @@ func (c *Sys) AuditHash(path string, input string) (string, error) { } func (c *Sys) ListAudit() (map[string]*Audit, error) { - r := c.c.NewRequest("GET", "/v1/sys/audit") + return c.ListAuditWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/audit") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -85,14 +96,19 @@ func (c *Sys) EnableAudit( } func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) + return c.EnableAuditWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuditWithOptionsWithContext(ctx context.Context, path string, options *EnableAuditOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit/%s", path)) if err := r.SetJSONBody(options); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -102,11 +118,16 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e } func (c *Sys) DisableAudit(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) + return c.DisableAuditWithContext(context.Background(), path) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) DisableAuditWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/audit/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go index 46abae4ef..238bd5e46 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -4,16 +4,22 @@ import ( "context" "errors" "fmt" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) ListAuth() (map[string]*AuthMount, error) { - r := c.c.NewRequest("GET", "/v1/sys/auth") + return c.ListAuthWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) ListAuthWithContext(ctx context.Context) (map[string]*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/auth") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -45,14 +51,19 @@ func (c *Sys) EnableAuth(path, authType, desc string) error { } func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) + return c.EnableAuthWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuthWithOptionsWithContext(ctx context.Context, path string, options *EnableAuthOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/auth/%s", path)) if err := r.SetJSONBody(options); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -62,11 +73,16 @@ func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) err } func (c *Sys) DisableAuth(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) + return c.DisableAuthWithContext(context.Background(), path) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) DisableAuthWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go index 64b3951dd..af306a07f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -4,15 +4,30 @@ import ( "context" "errors" "fmt" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { - return c.Capabilities(c.c.Token(), path) + return c.CapabilitiesSelfWithContext(context.Background(), path) +} + +func (c *Sys) CapabilitiesSelfWithContext(ctx context.Context, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.CapabilitiesWithContext(ctx, c.c.Token(), path) } func (c *Sys) Capabilities(token, path string) ([]string, error) { + return c.CapabilitiesWithContext(context.Background(), token, path) +} + +func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]string{ "token": token, "path": path, @@ -23,14 +38,12 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) { reqPath = fmt.Sprintf("%s-self", reqPath) } - r := c.c.NewRequest("POST", reqPath) + r := c.c.NewRequest(http.MethodPost, reqPath) if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go index ef136dcbb..1e2cda4f4 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -3,16 +3,22 @@ package api import ( "context" "errors" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) CORSStatus() (*CORSResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/config/cors") + return c.CORSStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) CORSStatusWithContext(ctx context.Context) (*CORSResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -36,14 +42,19 @@ func (c *Sys) CORSStatus() (*CORSResponse, error) { } func (c *Sys) ConfigureCORS(req *CORSRequest) error { - r := c.c.NewRequest("PUT", "/v1/sys/config/cors") + return c.ConfigureCORSWithContext(context.Background(), req) +} + +func (c *Sys) ConfigureCORSWithContext(ctx context.Context, req *CORSRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/config/cors") if err := r.SetJSONBody(req); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -51,11 +62,16 @@ func (c *Sys) ConfigureCORS(req *CORSRequest) error { } func (c *Sys) DisableCORS() error { - r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") + return c.DisableCORSWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) DisableCORSWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go index 870dacb09..096cadb79 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -1,25 +1,41 @@ package api -import "context" +import ( + "context" + "net/http" +) func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { - return c.generateRootStatusCommon("/v1/sys/generate-root/attempt") + return c.GenerateRootStatusWithContext(context.Background()) } func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { - return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") + return c.GenerateDROperationTokenStatusWithContext(context.Background()) } func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { - return c.generateRootStatusCommon("/v1/sys/generate-recovery-token/attempt") + return c.GenerateRecoveryOperationTokenStatusWithContext(context.Background()) } -func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { - r := c.c.NewRequest("GET", path) +func (c *Sys) GenerateRootStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) GenerateDROperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootStatusCommonWithContext(ctx context.Context, path string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -31,31 +47,44 @@ func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse } func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { - return c.generateRootInitCommon("/v1/sys/generate-root/attempt", otp, pgpKey) + return c.GenerateRootInitWithContext(context.Background(), otp, pgpKey) } func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { - return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) + return c.GenerateDROperationTokenInitWithContext(context.Background(), otp, pgpKey) } func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { - return c.generateRootInitCommon("/v1/sys/generate-recovery-token/attempt", otp, pgpKey) + return c.GenerateRecoveryOperationTokenInitWithContext(context.Background(), otp, pgpKey) } -func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { +func (c *Sys) GenerateRootInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommonWithContext(ctx context.Context, path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "otp": otp, "pgp_key": pgpKey, } - r := c.c.NewRequest("PUT", path) + r := c.c.NewRequest(http.MethodPut, path) if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -67,23 +96,36 @@ func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootSta } func (c *Sys) GenerateRootCancel() error { - return c.generateRootCancelCommon("/v1/sys/generate-root/attempt") + return c.GenerateRootCancelWithContext(context.Background()) } func (c *Sys) GenerateDROperationTokenCancel() error { - return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") + return c.GenerateDROperationTokenCancelWithContext(context.Background()) } func (c *Sys) GenerateRecoveryOperationTokenCancel() error { - return c.generateRootCancelCommon("/v1/sys/generate-recovery-token/attempt") + return c.GenerateRecoveryOperationTokenCancelWithContext(context.Background()) } -func (c *Sys) generateRootCancelCommon(path string) error { - r := c.c.NewRequest("DELETE", path) +func (c *Sys) GenerateRootCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) GenerateDROperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootCancelCommonWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -91,31 +133,44 @@ func (c *Sys) generateRootCancelCommon(path string) error { } func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { - return c.generateRootUpdateCommon("/v1/sys/generate-root/update", shard, nonce) + return c.GenerateRootUpdateWithContext(context.Background(), shard, nonce) } func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { - return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) + return c.GenerateDROperationTokenUpdateWithContext(context.Background(), shard, nonce) } func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { - return c.generateRootUpdateCommon("/v1/sys/generate-recovery-token/update", shard, nonce) + return c.GenerateRecoveryOperationTokenUpdateWithContext(context.Background(), shard, nonce) } -func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) { +func (c *Sys) GenerateRootUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-recovery-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommonWithContext(ctx context.Context, path, shard, nonce string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "key": shard, "nonce": nonce, } - r := c.c.NewRequest("PUT", path) + r := c.c.NewRequest(http.MethodPut, path) if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go index 408da0509..35bf40336 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -2,15 +2,21 @@ package api import ( "context" + "net/http" "time" ) func (c *Sys) HAStatus() (*HAStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/ha-status") + return c.HAStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) HAStatusWithContext(ctx context.Context) (*HAStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/ha-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go index d5d779600..953c1c21e 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_health.go +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -1,9 +1,19 @@ package api -import "context" +import ( + "context" + "net/http" +) func (c *Sys) Health() (*HealthResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/health") + return c.HealthWithContext(context.Background()) +} + +func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/health") // If the code is 400 or above it will automatically turn into an error, // but the sys/health API defaults to returning 5xx when not sealed or // inited, so we force this code to be something else so we parse correctly @@ -13,9 +23,7 @@ func (c *Sys) Health() (*HealthResponse, error) { r.Params.Add("drsecondarycode", "299") r.Params.Add("performancestandbycode", "299") - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go index 0e499c6e3..05dea86f6 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_init.go +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -1,13 +1,21 @@ package api -import "context" +import ( + "context" + "net/http" +) func (c *Sys) InitStatus() (bool, error) { - r := c.c.NewRequest("GET", "/v1/sys/init") + return c.InitStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) InitStatusWithContext(ctx context.Context) (bool, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return false, err } @@ -19,14 +27,19 @@ func (c *Sys) InitStatus() (bool, error) { } func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/init") + return c.InitWithContext(context.Background(), opts) +} + +func (c *Sys) InitWithContext(ctx context.Context, opts *InitRequest) (*InitResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/init") if err := r.SetJSONBody(opts); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go index 1c6be8d88..a74e206eb 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leader.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -2,15 +2,21 @@ package api import ( "context" + "net/http" "time" ) func (c *Sys) Leader() (*LeaderResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/leader") + return c.LeaderWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) LeaderWithContext(ctx context.Context) (*LeaderResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/leader") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go index e018015de..c02402f53 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leases.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -3,10 +3,18 @@ package api import ( "context" "errors" + "net/http" ) func (c *Sys) Renew(id string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") + return c.RenewWithContext(context.Background(), id, increment) +} + +func (c *Sys) RenewWithContext(ctx context.Context, id string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/renew") body := map[string]interface{}{ "increment": increment, @@ -16,9 +24,7 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -28,7 +34,14 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { } func (c *Sys) Lookup(id string) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/sys/leases/lookup") + return c.LookupWithContext(context.Background(), id) +} + +func (c *Sys) LookupWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/lookup") body := map[string]interface{}{ "lease_id": id, @@ -37,9 +50,7 @@ func (c *Sys) Lookup(id string) (*Secret, error) { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -49,7 +60,14 @@ func (c *Sys) Lookup(id string) (*Secret, error) { } func (c *Sys) Revoke(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke") + return c.RevokeWithContext(context.Background(), id) +} + +func (c *Sys) RevokeWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke") body := map[string]interface{}{ "lease_id": id, } @@ -57,9 +75,7 @@ func (c *Sys) Revoke(id string) error { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -67,11 +83,16 @@ func (c *Sys) Revoke(id string) error { } func (c *Sys) RevokePrefix(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) + return c.RevokePrefixWithContext(context.Background(), id) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RevokePrefixWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-prefix/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -79,11 +100,16 @@ func (c *Sys) RevokePrefix(id string) error { } func (c *Sys) RevokeForce(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) + return c.RevokeForceWithContext(context.Background(), id) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RevokeForceWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-force/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -91,6 +117,13 @@ func (c *Sys) RevokeForce(id string) error { } func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + return c.RevokeWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) RevokeWithOptionsWithContext(ctx context.Context, opts *RevokeOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + if opts == nil { return errors.New("nil options provided") } @@ -105,7 +138,7 @@ func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { } path += opts.LeaseID - r := c.c.NewRequest("PUT", path) + r := c.c.NewRequest(http.MethodPut, path) if !opts.Force { body := map[string]interface{}{ "sync": opts.Sync, @@ -115,9 +148,7 @@ func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { } } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go index ec27f2285..df2774672 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_monitor.go +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -4,12 +4,13 @@ import ( "bufio" "context" "fmt" + "net/http" ) // Monitor returns a channel that outputs strings containing the log messages // coming from the server. func (c *Sys) Monitor(ctx context.Context, logLevel string) (chan string, error) { - r := c.c.NewRequest("GET", "/v1/sys/monitor") + r := c.c.NewRequest(http.MethodGet, "/v1/sys/monitor") if logLevel == "" { r.Params.Add("log_level", "info") diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index 8a0c5b985..52f51139f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -4,17 +4,23 @@ import ( "context" "errors" "fmt" + "net/http" "time" "github.com/mitchellh/mapstructure" ) func (c *Sys) ListMounts() (map[string]*MountOutput, error) { - r := c.c.NewRequest("GET", "/v1/sys/mounts") + return c.ListMountsWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) ListMountsWithContext(ctx context.Context) (map[string]*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/mounts") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -38,14 +44,19 @@ func (c *Sys) ListMounts() (map[string]*MountOutput, error) { } func (c *Sys) Mount(path string, mountInfo *MountInput) error { - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) + return c.MountWithContext(context.Background(), path, mountInfo) +} + +func (c *Sys) MountWithContext(ctx context.Context, path string, mountInfo *MountInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s", path)) if err := r.SetJSONBody(mountInfo); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -55,27 +66,37 @@ func (c *Sys) Mount(path string, mountInfo *MountInput) error { } func (c *Sys) Unmount(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) + return c.UnmountWithContext(context.Background(), path) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) UnmountWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } -// Remount kicks off a remount operation, polls the status endpoint using -// the migration ID till either success or failure state is observed +// Remount wraps RemountWithContext using context.Background. func (c *Sys) Remount(from, to string) error { - remountResp, err := c.StartRemount(from, to) + return c.RemountWithContext(context.Background(), from, to) +} + +// RemountWithContext kicks off a remount operation, polls the status endpoint using +// the migration ID till either success or failure state is observed +func (c *Sys) RemountWithContext(ctx context.Context, from, to string) error { + remountResp, err := c.StartRemountWithContext(ctx, from, to) if err != nil { return err } for { - remountStatusResp, err := c.RemountStatus(remountResp.MigrationID) + remountStatusResp, err := c.RemountStatusWithContext(ctx, remountResp.MigrationID) if err != nil { return err } @@ -89,21 +110,27 @@ func (c *Sys) Remount(from, to string) error { } } -// StartRemount kicks off a mount migration and returns a response with the migration ID +// StartRemount wraps StartRemountWithContext using context.Background. func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { + return c.StartRemountWithContext(context.Background(), from, to) +} + +// StartRemountWithContext kicks off a mount migration and returns a response with the migration ID +func (c *Sys) StartRemountWithContext(ctx context.Context, from, to string) (*MountMigrationOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "from": from, "to": to, } - r := c.c.NewRequest("POST", "/v1/sys/remount") + r := c.c.NewRequest(http.MethodPost, "/v1/sys/remount") if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -125,13 +152,19 @@ func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { return &result, err } -// RemountStatus checks the status of a mount migration operation with the provided ID +// RemountStatus wraps RemountStatusWithContext using context.Background. func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) + return c.RemountStatusWithContext(context.Background(), migrationID) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +// RemountStatusWithContext checks the status of a mount migration operation with the provided ID +func (c *Sys) RemountStatusWithContext(ctx context.Context, migrationID string) (*MountMigrationStatusOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -154,14 +187,19 @@ func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, er } func (c *Sys) TuneMount(path string, config MountConfigInput) error { - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + return c.TuneMountWithContext(context.Background(), path, config) +} + +func (c *Sys) TuneMountWithContext(ctx context.Context, path string, config MountConfigInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) if err := r.SetJSONBody(config); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -169,11 +207,16 @@ func (c *Sys) TuneMount(path string, config MountConfigInput) error { } func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + return c.MountConfigWithContext(context.Background(), path) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) MountConfigWithContext(ctx context.Context, path string) (*MountConfigOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index c17072d95..920af4c3c 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -29,14 +29,22 @@ type ListPluginsResponse struct { Names []string `json:"names"` } -// ListPlugins lists all plugins in the catalog and returns their names as a -// list of strings. +// ListPlugins wraps ListPluginsWithContext using context.Background. func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + return c.ListPluginsWithContext(context.Background(), i) +} + +// ListPluginsWithContext lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (*ListPluginsResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := "" method := "" if i.Type == consts.PluginTypeUnknown { path = "/v1/sys/plugins/catalog" - method = "GET" + method = http.MethodGet } else { path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Type) method = "LIST" @@ -46,13 +54,11 @@ func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { if method == "LIST" { // Set this for broader compatibility, but we use LIST above to be able // to handle the wrapping lookup function - req.Method = "GET" + req.Method = http.MethodGet req.Params.Set("list", "true") } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err != nil && resp == nil { return nil, err } @@ -66,7 +72,7 @@ func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { // switch it to a LIST. if resp.StatusCode == 405 { req.Params.Set("list", "true") - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err != nil { return nil, err } @@ -142,14 +148,20 @@ type GetPluginResponse struct { SHA256 string `json:"sha256"` } -// GetPlugin retrieves information about the plugin. +// GetPlugin wraps GetPluginWithContext using context.Background. func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + return c.GetPluginWithContext(context.Background(), i) +} + +// GetPluginWithContext retrieves information about the plugin. +func (c *Sys) GetPluginWithContext(ctx context.Context, i *GetPluginInput) (*GetPluginResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := catalogPathByType(i.Type, i.Name) req := c.c.NewRequest(http.MethodGet, path) - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err != nil { return nil, err } @@ -183,8 +195,16 @@ type RegisterPluginInput struct { SHA256 string `json:"sha256,omitempty"` } -// RegisterPlugin registers the plugin with the given information. +// RegisterPlugin wraps RegisterPluginWithContext using context.Background. func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + return c.RegisterPluginWithContext(context.Background(), i) +} + +// RegisterPluginWithContext registers the plugin with the given information. +func (c *Sys) RegisterPluginWithContext(ctx context.Context, i *RegisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := catalogPathByType(i.Type, i.Name) req := c.c.NewRequest(http.MethodPut, path) @@ -192,9 +212,7 @@ func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err == nil { defer resp.Body.Close() } @@ -210,15 +228,21 @@ type DeregisterPluginInput struct { Type consts.PluginType `json:"type"` } -// DeregisterPlugin removes the plugin with the given name from the plugin -// catalog. +// DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + return c.DeregisterPluginWithContext(context.Background(), i) +} + +// DeregisterPluginWithContext removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := catalogPathByType(i.Type, i.Name) req := c.c.NewRequest(http.MethodDelete, path) - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err == nil { defer resp.Body.Close() } @@ -237,9 +261,17 @@ type ReloadPluginInput struct { Scope string `json:"scope"` } -// ReloadPlugin reloads mounted plugin backends, possibly returning -// reloadId for a cluster scoped reload +// ReloadPlugin wraps ReloadPluginWithContext using context.Background. func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { + return c.ReloadPluginWithContext(context.Background(), i) +} + +// ReloadPluginWithContext reloads mounted plugin backends, possibly returning +// reloadId for a cluster scoped reload +func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := "/v1/sys/plugins/reload/backend" req := c.c.NewRequest(http.MethodPut, path) @@ -247,10 +279,7 @@ func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { return "", err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err != nil { return "", err } @@ -287,16 +316,21 @@ type ReloadPluginStatusInput struct { ReloadID string `json:"reload_id"` } -// ReloadPluginStatus retrieves the status of a reload operation +// ReloadPluginStatus wraps ReloadPluginStatusWithContext using context.Background. func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + return c.ReloadPluginStatusWithContext(context.Background(), reloadStatusInput) +} + +// ReloadPluginStatusWithContext retrieves the status of a reload operation +func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + path := "/v1/sys/plugins/reload/backend/status" req := c.c.NewRequest(http.MethodGet, path) req.Params.Add("reload_id", reloadStatusInput.ReloadID) - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - resp, err := c.c.RawRequestWithContext(ctx, req) + resp, err := c.c.rawRequestWithContext(ctx, req) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go index c0c239f96..4a4f91b08 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_policy.go +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -4,20 +4,26 @@ import ( "context" "errors" "fmt" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) ListPolicies() ([]string, error) { + return c.ListPoliciesWithContext(context.Background()) +} + +func (c *Sys) ListPoliciesWithContext(ctx context.Context) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") // Set this for broader compatibility, but we use LIST above to be able to // handle the wrapping lookup function - r.Method = "GET" + r.Method = http.MethodGet r.Params.Set("list", "true") - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -41,11 +47,16 @@ func (c *Sys) ListPolicies() ([]string, error) { } func (c *Sys) GetPolicy(name string) (string, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + return c.GetPolicyWithContext(context.Background(), name) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) GetPolicyWithContext(ctx context.Context, name string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() if resp.StatusCode == 404 { @@ -72,18 +83,23 @@ func (c *Sys) GetPolicy(name string) (string, error) { } func (c *Sys) PutPolicy(name, rules string) error { + return c.PutPolicyWithContext(context.Background(), name, rules) +} + +func (c *Sys) PutPolicyWithContext(ctx context.Context, name, rules string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]string{ "policy": rules, } - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) if err := r.SetJSONBody(body); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } @@ -93,11 +109,16 @@ func (c *Sys) PutPolicy(name, rules string) error { } func (c *Sys) DeletePolicy(name string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + return c.DeletePolicyWithContext(context.Background(), name) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) DeletePolicyWithContext(ctx context.Context, name string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index cbf3a2020..df10bf672 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "io" "io/ioutil" "net/http" @@ -14,7 +13,6 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/mapstructure" ) @@ -110,18 +108,24 @@ type AutopilotServer struct { Meta map[string]string `mapstructure:"meta"` } -// RaftJoin adds the node from which this call is invoked from to the raft -// cluster represented by the leader address in the parameter. +// RaftJoin wraps RaftJoinWithContext using context.Background. func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { - r := c.c.NewRequest("POST", "/v1/sys/storage/raft/join") + return c.RaftJoinWithContext(context.Background(), opts) +} + +// RaftJoinWithContext adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoinWithContext(ctx context.Context, opts *RaftJoinRequest) (*RaftJoinResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/join") if err := r.SetJSONBody(opts); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -132,87 +136,22 @@ func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { return &result, err } -// RaftSnapshot invokes the API that takes the snapshot of the raft cluster and -// writes it to the supplied io.Writer. +// RaftSnapshot wraps RaftSnapshotWithContext using context.Background. func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { - r := c.c.NewRequest("GET", "/v1/sys/storage/raft/snapshot") + return c.RaftSnapshotWithContext(context.Background(), snapWriter) +} + +// RaftSnapshotWithContext invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) error { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/snapshot") r.URL.RawQuery = r.Params.Encode() - req, err := http.NewRequest(http.MethodGet, r.URL.RequestURI(), nil) + resp, err := c.c.httpRequestWithContext(ctx, r) if err != nil { return err } - - req.URL.User = r.URL.User - req.URL.Scheme = r.URL.Scheme - req.URL.Host = r.URL.Host - req.Host = r.URL.Host - - if r.Headers != nil { - for header, vals := range r.Headers { - for _, val := range vals { - req.Header.Add(header, val) - } - } - } - - if len(r.ClientToken) != 0 { - req.Header.Set(consts.AuthHeaderName, r.ClientToken) - } - - if len(r.WrapTTL) != 0 { - req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) - } - - if len(r.MFAHeaderVals) != 0 { - for _, mfaHeaderVal := range r.MFAHeaderVals { - req.Header.Add("X-Vault-MFA", mfaHeaderVal) - } - } - - if r.PolicyOverride { - req.Header.Set("X-Vault-Policy-Override", "true") - } - - // Avoiding the use of RawRequestWithContext which reads the response body - // to determine if the body contains error message. - var result *Response - resp, err := c.c.config.HttpClient.Do(req) - if err != nil { - return err - } - - if resp == nil { - return nil - } - - // Check for a redirect, only allowing for a single redirect - if resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307 { - // Parse the updated location - respLoc, err := resp.Location() - if err != nil { - return err - } - - // Ensure a protocol downgrade doesn't happen - if req.URL.Scheme == "https" && respLoc.Scheme != "https" { - return fmt.Errorf("redirect would cause protocol downgrade") - } - - // Update the request - req.URL = respLoc - - // Retry the request - resp, err = c.c.config.HttpClient.Do(req) - if err != nil { - return err - } - } - - result = &Response{Response: resp} - if err := result.Error(); err != nil { - return err - } + defer resp.Body.Close() // Make sure that the last file in the archive, SHA256SUMS.sealed, is present // and non-empty. This is to catch cases where the snapshot failed midstream, @@ -271,20 +210,23 @@ func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { return nil } -// RaftSnapshotRestore reads the snapshot from the io.Reader and installs that -// snapshot, returning the cluster to the state defined by it. +// RaftSnapshotRestore wraps RaftSnapshotRestoreWithContext using context.Background. func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + return c.RaftSnapshotRestoreWithContext(context.Background(), snapReader, force) +} + +// RaftSnapshotRestoreWithContext reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestoreWithContext(ctx context.Context, snapReader io.Reader, force bool) error { path := "/v1/sys/storage/raft/snapshot" if force { path = "/v1/sys/storage/raft/snapshot-force" } - r := c.c.NewRequest("POST", path) + r := c.c.NewRequest(http.MethodPost, path) r.Body = snapReader - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.httpRequestWithContext(ctx, r) if err != nil { return err } @@ -293,13 +235,19 @@ func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { return nil } -// RaftAutopilotState returns the state of the raft cluster as seen by autopilot. +// RaftAutopilotState wraps RaftAutopilotStateWithContext using context.Background. func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { - r := c.c.NewRequest("GET", "/v1/sys/storage/raft/autopilot/state") + return c.RaftAutopilotStateWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +// RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. +func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") + + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() if resp.StatusCode == 404 { @@ -327,13 +275,19 @@ func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { return &result, err } -// RaftAutopilotConfiguration fetches the autopilot config. +// RaftAutopilotConfiguration wraps RaftAutopilotConfigurationWithContext using context.Background. func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { - r := c.c.NewRequest("GET", "/v1/sys/storage/raft/autopilot/configuration") + return c.RaftAutopilotConfigurationWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +// RaftAutopilotConfigurationWithContext fetches the autopilot config. +func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") + + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() if resp.StatusCode == 404 { @@ -369,17 +323,23 @@ func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { return &result, err } -// PutRaftAutopilotConfiguration allows modifying the raft autopilot configuration +// PutRaftAutopilotConfiguration wraps PutRaftAutopilotConfigurationWithContext using context.Background. func (c *Sys) PutRaftAutopilotConfiguration(opts *AutopilotConfig) error { - r := c.c.NewRequest("POST", "/v1/sys/storage/raft/autopilot/configuration") + return c.PutRaftAutopilotConfigurationWithContext(context.Background(), opts) +} + +// PutRaftAutopilotConfigurationWithContext allows modifying the raft autopilot configuration +func (c *Sys) PutRaftAutopilotConfigurationWithContext(ctx context.Context, opts *AutopilotConfig) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/autopilot/configuration") if err := r.SetJSONBody(opts); err != nil { return err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go index 153e486c6..2ac8a4743 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -3,16 +3,22 @@ package api import ( "context" "errors" + "net/http" "github.com/mitchellh/mapstructure" ) func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/init") + return c.RekeyStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -24,11 +30,16 @@ func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { } func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") + return c.RekeyRecoveryKeyStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRecoveryKeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -40,11 +51,16 @@ func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { } func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/verify") + return c.RekeyVerificationStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -56,11 +72,16 @@ func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error } func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify") + return c.RekeyRecoveryKeyVerificationStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRecoveryKeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -72,14 +93,19 @@ func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResp } func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") + return c.RekeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/init") if err := r.SetJSONBody(config); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -91,14 +117,19 @@ func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) } func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") + return c.RekeyRecoveryKeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyRecoveryKeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/init") if err := r.SetJSONBody(config); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -110,11 +141,16 @@ func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusRespon } func (c *Sys) RekeyCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") + return c.RekeyCancelWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -122,11 +158,16 @@ func (c *Sys) RekeyCancel() error { } func (c *Sys) RekeyRecoveryKeyCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") + return c.RekeyRecoveryKeyCancelWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRecoveryKeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -134,11 +175,16 @@ func (c *Sys) RekeyRecoveryKeyCancel() error { } func (c *Sys) RekeyVerificationCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify") + return c.RekeyVerificationCancelWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -146,11 +192,16 @@ func (c *Sys) RekeyVerificationCancel() error { } func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify") + return c.RekeyRecoveryKeyVerificationCancelWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRecoveryKeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -158,19 +209,24 @@ func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { } func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "key": shard, "nonce": nonce, } - r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/update") if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -182,19 +238,24 @@ func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { } func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyRecoveryKeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "key": shard, "nonce": nonce, } - r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/update") if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -206,11 +267,16 @@ func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, } func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") + return c.RekeyRetrieveBackupWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRetrieveBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -234,11 +300,16 @@ func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { } func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-key-backup") + return c.RekeyRetrieveRecoveryBackupWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyRetrieveRecoveryBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -262,11 +333,16 @@ func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { } func (c *Sys) RekeyDeleteBackup() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") + return c.RekeyDeleteBackupWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyDeleteBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -275,11 +351,16 @@ func (c *Sys) RekeyDeleteBackup() error { } func (c *Sys) RekeyDeleteRecoveryBackup() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-key-backup") + return c.RekeyDeleteRecoveryBackupWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RekeyDeleteRecoveryBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -288,19 +369,24 @@ func (c *Sys) RekeyDeleteRecoveryBackup() error { } func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "key": shard, "nonce": nonce, } - r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/verify") if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -312,19 +398,24 @@ func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUp } func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyRecoveryKeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + body := map[string]interface{}{ "key": shard, "nonce": nonce, } - r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/verify") if err := r.SetJSONBody(body); err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go index e081587b1..fa86886c3 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rotate.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -4,15 +4,21 @@ import ( "context" "encoding/json" "errors" + "net/http" "time" ) func (c *Sys) Rotate() error { - r := c.c.NewRequest("POST", "/v1/sys/rotate") + return c.RotateWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) RotateWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/rotate") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -20,11 +26,16 @@ func (c *Sys) Rotate() error { } func (c *Sys) KeyStatus() (*KeyStatus, error) { - r := c.c.NewRequest("GET", "/v1/sys/key-status") + return c.KeyStatusWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) KeyStatusWithContext(ctx context.Context) (*KeyStatus, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/key-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index 20d41a28f..dcd8d32a5 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -1,59 +1,87 @@ package api -import "context" +import ( + "context" + "net/http" +) func (c *Sys) SealStatus() (*SealStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/seal-status") - return sealStatusRequest(c, r) + return c.SealStatusWithContext(context.Background()) +} + +func (c *Sys) SealStatusWithContext(ctx context.Context) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/seal-status") + return sealStatusRequestWithContext(ctx, c, r) } func (c *Sys) Seal() error { - r := c.c.NewRequest("PUT", "/v1/sys/seal") + return c.SealWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) SealWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) - if err == nil { - defer resp.Body.Close() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/seal") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err } - return err + defer resp.Body.Close() + + return nil } func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + return c.ResetUnsealProcessWithContext(context.Background()) +} + +func (c *Sys) ResetUnsealProcessWithContext(ctx context.Context) (*SealStatusResponse, error) { body := map[string]interface{}{"reset": true} - r := c.c.NewRequest("PUT", "/v1/sys/unseal") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") if err := r.SetJSONBody(body); err != nil { return nil, err } - return sealStatusRequest(c, r) + return sealStatusRequestWithContext(ctx, c, r) } func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + return c.UnsealWithContext(context.Background(), shard) +} + +func (c *Sys) UnsealWithContext(ctx context.Context, shard string) (*SealStatusResponse, error) { body := map[string]interface{}{"key": shard} - r := c.c.NewRequest("PUT", "/v1/sys/unseal") + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") if err := r.SetJSONBody(body); err != nil { return nil, err } - return sealStatusRequest(c, r) + return sealStatusRequestWithContext(ctx, c, r) } func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/unseal") + return c.UnsealWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) UnsealWithOptionsWithContext(ctx context.Context, opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(opts); err != nil { return nil, err } - return sealStatusRequest(c, r) + return sealStatusRequestWithContext(ctx, c, r) } -func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { - ctx, cancelFunc := context.WithCancel(context.Background()) +func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + resp, err := c.c.rawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go index 55dc6fbcb..833f31a6f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -1,13 +1,21 @@ package api -import "context" +import ( + "context" + "net/http" +) func (c *Sys) StepDown() error { - r := c.c.NewRequest("PUT", "/v1/sys/step-down") + return c.StepDownWithContext(context.Background()) +} - ctx, cancelFunc := context.WithCancel(context.Background()) +func (c *Sys) StepDownWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.c.RawRequestWithContext(ctx, r) + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/step-down") + + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil && resp.Body != nil { resp.Body.Close() } diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index e3b437985..4375bbc64 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,19 @@ +## 1.19.0 + +## Features +- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582] +- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714] +- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f] + +## Fixes +- update RELEASING instructions to match ginkgo [0917cde] +- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0] +- Fix CVE-2021-38561 (#534) [f1b4456] +- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497] +- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5] +- Fix for Go 1.18 (#532) [56d2a29] +- Document precendence of timeouts (#533) [b607941] + ## 1.18.1 ## Fixes diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 998d64ee7..2d30d9992 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -7,6 +7,11 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: - New Features (minor version) - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) -2. Update GOMEGA_VERSION in `gomega_dsl.go` -3. Push a commit with the version number as the commit message (e.g. `v1.3.0`) -4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. +1. Update GOMEGA_VERSION in `gomega_dsl.go` +1. Commit, push, and release: + ``` + git commit -m "vM.m.p" + git push + gh release create "vM.m.p" + git fetch --tags origin master + ``` \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 6936e2411..dcb7e8879 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.18.1" +const GOMEGA_VERSION = "1.19.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -52,7 +52,7 @@ var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle())) // rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures // or for use in a non-test setting. func NewGomega(fail types.GomegaFailHandler) Gomega { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail) } // WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage @@ -69,6 +69,20 @@ type WithT = internal.Gomega // GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. type GomegaWithT = WithT +// inner is an interface that allows users to provide a wrapper around Default. The wrapper +// must implement the inner interface and return either the original Default or the result of +// a call to NewGomega(). +type inner interface { + Inner() Gomega +} + +func internalGomega(g Gomega) *internal.Gomega { + if v, ok := g.(inner); ok { + return v.Inner().(*internal.Gomega) + } + return g.(*internal.Gomega) +} + // NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with // Gomega's rich ecosystem of matchers in standard `testing` test suits. // @@ -79,7 +93,7 @@ type GomegaWithT = WithT // g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") // } func NewWithT(t types.GomegaTestingT) *WithT { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t) } // NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. @@ -88,20 +102,20 @@ var NewGomegaWithT = NewWithT // RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails // the fail handler passed into RegisterFailHandler is called. func RegisterFailHandler(fail types.GomegaFailHandler) { - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterFailHandlerWithT is deprecated and will be removed in a future release. // users should use RegisterFailHandler, or RegisterTestingT func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) { fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.") - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterTestingT connects Gomega to Golang's XUnit style // Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test. func RegisterTestingT(t types.GomegaTestingT) { - Default.(*internal.Gomega).ConfigureWithT(t) + internalGomega(Default).ConfigureWithT(t) } // InterceptGomegaFailures runs a given callback and returns an array of @@ -112,13 +126,13 @@ func RegisterTestingT(t types.GomegaTestingT) { // This is most useful when testing custom matchers, but can also be used to check // on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { - originalHandler := Default.(*internal.Gomega).Fail + originalHandler := internalGomega(Default).Fail failures := []string{} - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + internalGomega(Default).Fail = func(message string, callerSkip ...int) { failures = append(failures, message) } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler }() f() return failures @@ -131,14 +145,14 @@ func InterceptGomegaFailures(f func()) []string { // does not register a failure with the FailHandler registered via RegisterFailHandler - it is up // to the user to decide what to do with the returned error func InterceptGomegaFailure(f func()) (err error) { - originalHandler := Default.(*internal.Gomega).Fail - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + originalHandler := internalGomega(Default).Fail + internalGomega(Default).Fail = func(message string, callerSkip ...int) { err = errors.New(message) panic("stop execution") } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler if e := recover(); e != nil { if err == nil { panic(e) @@ -151,7 +165,7 @@ func InterceptGomegaFailure(f func()) (err error) { } func ensureDefaultGomegaIsConfigured() { - if !Default.(*internal.Gomega).IsConfigured() { + if !internalGomega(Default).IsConfigured() { panic(nilGomegaPanic) } } diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index b46e461a5..b58dd67cb 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -256,16 +256,26 @@ func BeZero() types.GomegaMatcher { return &matchers.BeZeroMatcher{} } -//ContainElement succeeds if actual contains the passed in element. -//By default ContainElement() uses Equal() to perform the match, however a -//matcher can be passed in instead: +//ContainElement succeeds if actual contains the passed in element. By default +//ContainElement() uses Equal() to perform the match, however a matcher can be +//passed in instead: // Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) // -//Actual must be an array, slice or map. -//For maps, ContainElement searches through the map's values. -func ContainElement(element interface{}) types.GomegaMatcher { +//Actual must be an array, slice or map. For maps, ContainElement searches +//through the map's values. +// +//If you want to have a copy of the matching element(s) found you can pass a +//pointer to a variable of the appropriate type. If the variable isn't a slice +//or map, then exactly one match will be expected and returned. If the variable +//is a slice or map, then at least one match is expected and all matches will be +//stored in the variable. +// +// var findings []string +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) +func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, + Result: result, } } @@ -320,6 +330,20 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher { } } +//HaveEach succeeds if actual solely contains elements that match the passed in element. +//Please note that if actual is empty, HaveEach always will succeed. +//By default HaveEach() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. +//For maps, HaveEach searches through the map's values. +func HaveEach(element interface{}) types.GomegaMatcher { + return &matchers.HaveEachMatcher{ + Element: element, + } +} + //HaveKey succeeds if actual is a map with the passed in key. //By default HaveKey uses Equal() to perform the match, however a //matcher can be passed in instead: diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 8d6c44c7a..3d45c9ebc 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -11,6 +12,7 @@ import ( type ContainElementMatcher struct { Element interface{} + Result []interface{} } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { @@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) } + var actualT reflect.Type + var result reflect.Value + switch l := len(matcher.Result); { + case l > 1: + return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") + case l == 1: + if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { + return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", + format.Object(matcher.Result[0], 1)) + } + actualT = reflect.TypeOf(actual) + resultReference := matcher.Result[0] + result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings + switch result.Kind() { + case reflect.Array: + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) + case reflect.Slice: + if !isArrayOrSlice(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) + } + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + case reflect.Map: + if !isMap(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } + } + } + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} @@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e value := reflect.ValueOf(actual) var valueAt func(int) interface{} + + var getFindings func() reflect.Value + var foundAt func(int) + if isMap(actual) { keys := value.MapKeys() valueAt = func(i int) interface{} { return value.MapIndex(keys[i]).Interface() } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { + return fm + } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } } else { valueAt = func(i int) interface{} { return value.Index(i).Interface() } + if result.Kind() != reflect.Invalid { + var f reflect.Value + if result.Kind() == reflect.Slice { + f = reflect.MakeSlice(result.Type(), 0, 0) + } else { + f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { + return f + } + foundAt = func(i int) { + f = reflect.Append(f, value.Index(i)) + } + } } var lastError error for i := 0; i < value.Len(); i++ { - success, err := elemMatcher.Match(valueAt(i)) + elem := valueAt(i) + success, err := elemMatcher.Match(elem) if err != nil { lastError = err continue } if success { - return true, nil + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } - return false, lastError + // when the expectation isn't interested in the findings except for success + // or non-success, then we're done here and return the last matcher error + // seen, if any, as well as non-success. + if result.Kind() == reflect.Invalid { + return false, lastError + } + + // pick up any findings the test is interested in as it specified a non-nil + // result reference. However, the expection always is that there are at + // least one or multiple findings. So, if a result is expected, but we had + // no findings, then this is an error. + findings := getFindings() + if findings.Len() == 0 { + return false, lastError + } + + // there's just a single finding and the result is neither a slice nor a map + // (so it's a scalar): pick the one and only finding and return it in the + // place the reference points to. + if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) { + if isMap(actual) { + miter := findings.MapRange() + miter.Next() + result.Set(miter.Value()) + } else { + result.Set(findings.Index(0)) + } + return true, nil + } + + // at least one or even multiple findings and a the result references a + // slice or a map, so all we need to do is to store our findings where the + // reference points to. + if !findings.Type().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s", + findings.Type().String(), result.Type().String()) + } + result.Set(findings) + return true, nil } func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go new file mode 100644 index 000000000..025b6e1ac --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveEachMatcher struct { + Element interface{} +} + +func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + if value.Len() == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + var valueAt func(int) interface{} + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + // if there are no elements, then HaveEach will match. + for i := 0; i < value.Len(); i++ { + success, err := elemMatcher.Match(valueAt(i)) + if err != nil { + return false, err + } + if !success { + return false, nil + } + } + + return true, nil +} + +// FailureMessage returns a suitable failure message. +func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +// NegatedFailureMessage returns a suitable negated failure message. +func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d267..3bb22a971 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -3,6 +3,7 @@ package assert import ( "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +31,8 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } } return compareEqual, false @@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 000000000..df22c47fc --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatability +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 000000000..1701af2a3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a..27e2420ed 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f0..d9ea368d0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182..759448783 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f..0357b2231 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } subsetValue := reflect.ValueOf(subset) @@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e..59c48277a 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83..5bb07c89c 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 000000000..aca4b2b31 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 000000000..eab532c96 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f135b0f75..4f0989763 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -735,7 +735,6 @@ func (cc *ClientConn) healthCheck() { err := cc.Ping(ctx) if err != nil { cc.closeForLostPing() - cc.t.connPool().MarkDead(cc) return } } @@ -907,6 +906,24 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } +func (cc *ClientConn) closeConn() error { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + return cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tlsUnderlyingConn(tc); nc != nil { + nc.Close() + } +} + func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 || cc.streamsReserved > 0 { @@ -921,7 +938,7 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.tconn.Close() + cc.closeConn() } func (cc *ClientConn) isDoNotReuseAndIdle() bool { @@ -938,7 +955,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) + done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -946,7 +963,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - done <- cc.tconn.Close() + close(done) break } if cancelled { @@ -957,8 +974,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case err := <-done: - return err + case <-done: + return cc.closeConn() case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -1001,9 +1018,9 @@ func (cc *ClientConn) closeForError(err error) error { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - return cc.tconn.Close() + cc.cond.Broadcast() + cc.mu.Unlock() + return cc.closeConn() } // Close closes the client connection immediately. @@ -1978,7 +1995,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) } cc.closed = true - defer cc.tconn.Close() + defer cc.closeConn() } cc.mu.Unlock() @@ -2025,8 +2042,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() defer close(cc.readerDone) if cc.idleTimer != nil { diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..52338d004 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc87..f9af78913 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4fbed1256..82bee1443 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf..bb96ef57b 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543..ea660a147 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,7 @@ package channelz import ( + "context" "fmt" "sort" "sync" @@ -49,7 +50,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +96,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -326,6 +322,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9bad03cec..7d996e51b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -82,7 +83,10 @@ var ( // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf35..20fb880f3 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2b..0c43efaa6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -741,6 +741,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 82a5ba7f2..81344abd7 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,21 +37,17 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -59,339 +55,174 @@ import ( // as a registry, for accumulating the services exposed by the server. type GRPCServer interface { grpc.ServiceRegistrar - GetServiceInfo() map[string]grpc.ServiceInfo + ServiceInfoProvider } var _ GRPCServer = (*grpc.Server)(nil) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s GRPCServer - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } } -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} - -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd -} - -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name -} - -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil -} - -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - return out, nil -} - -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil -} - -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) -} - -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) - } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - fd, err := fileDescContainingExtension(st, extNum) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -412,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -473,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 58c802f8a..978b89f37 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -117,9 +117,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05f..b24b6d539 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1283,9 +1283,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1549,7 +1550,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf..6926a06dc 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34..8cdd652e0 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -46,10 +46,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 9d3fd73da..5bd4f534c 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.1-dev" +const Version = "1.45.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7..ceb436c6c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..9c61112f5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..37803773f 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3ae..145c577bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a6..c65b0325c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -18,6 +18,7 @@ import ( ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +26,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -44,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool { var lazyUnmarshalOptions = unmarshalOptions{ resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -62,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) var flags piface.UnmarshalOutputFlags if out.initialized { @@ -82,6 +86,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821e..4c491bdf4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85..ee0e0573e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019..56a8a4ed3 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2..3d40d5249 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 - Patch = 1 + Minor = 28 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88..11bf7173b 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -42,18 +42,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..eb7764c30 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,31 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdca..702ddf22a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 133ba0493..8a2824b51 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -144,7 +144,6 @@ message PodDisruptionBudgetSpec { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 553cb316d..486f93461 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -36,9 +36,8 @@ type PodDisruptionBudgetSpec struct { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" patchStrategy:"replace" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index ad5063fdf..8cccebf2e 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,19 +1,13 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - pohly - - yagonobre - - vincepri - - detiber approvers: - dims - thockin - - justinsb - - tallclair - - piosz - - brancz - - lavalamp - serathius +emeritus_approvers: + - brancz + - justinsb + - lavalamp + - piosz + - tallclair diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 64d29622e..7de2212cc 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -23,6 +23,21 @@ Historical context is available here: * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. + ---- How to use klog @@ -32,6 +47,7 @@ How to use klog - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. @@ -85,7 +101,7 @@ The comment from glog.go introduces the ideas: glog.Fatalf("Initialization failed: %s", err) - See the documentation for the V function for an explanation + See the documentation of the V function for an explanation of these examples: if glog.V(2) { diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 000000000..0bf19280e --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,197 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled = true + + // globalLogger is the global Logger chosen by users of klog, nil if + // none is available. + globalLogger *Logger + + // globalLoggerOptions contains the options that were supplied for + // globalLogger. + globalLoggerOptions loggerOptions + + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + globalLogger = &logger + globalLoggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&globalLoggerOptions) + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + globalLogger = nil + globalLoggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +func EnableContextualLogging(enabled bool) { + contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +func FromContext(ctx context.Context) Logger { + if contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +func Background() Logger { + if globalLoggerOptions.contextualLogger { + // Is non-nil because globalLoggerOptions.contextualLogger is + // only true if a logger was set. + return *globalLogger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +func LoggerWithName(logger Logger, name string) Logger { + if contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +func NewContext(ctx context.Context, logger Logger) context.Context { + if contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 000000000..320a14772 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go new file mode 100644 index 000000000..602c3ed9e --- /dev/null +++ b/vendor/k8s.io/klog/v2/imports.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" +) + +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 000000000..ac88682a2 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,159 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. + next *Buffer +} + +// Buffers manages the reuse of individual buffer instances. It is thread-safe. +type Buffers struct { + // mu protects the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + mu sync.Mutex + + // freeList is a list of byte buffers, maintained under mu. + freeList *Buffer +} + +// GetBuffer returns a new, ready-to-use buffer. +func (bl *Buffers) GetBuffer() *Buffer { + bl.mu.Lock() + b := bl.freeList + if b != nil { + bl.freeList = b.next + } + bl.mu.Unlock() + if b == nil { + b = new(Buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// PutBuffer returns a buffer to the free list. +func (bl *Buffers) PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + bl.mu.Lock() + b.next = bl.freeList + bl.freeList = b + bl.mu.Unlock() +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 000000000..03d692c8f --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 000000000..b8b6af5c8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 000000000..d89731368 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,225 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "strconv" +) + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// TrimDuplicates deduplicates elements provided in multiple key/value tuple +// slices, whilst maintaining the distinction between where the items are +// contained. +func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { + // maintain a map of all seen keys + seenKeys := map[interface{}]struct{}{} + // build the same number of output slices as inputs + outs := make([][]interface{}, len(kvLists)) + // iterate over the input slices backwards, as 'later' kv specifications + // of the same key will take precedence over earlier ones + for i := len(kvLists) - 1; i >= 0; i-- { + // initialise this output slice + outs[i] = []interface{}{} + // obtain a reference to the kvList we are processing + // and make sure it has an even number of entries + kvList := kvLists[i] + if len(kvList)%2 != 0 { + kvList = append(kvList, missingValue) + } + + // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for + // slices that have an even number of elements. + // We add (len(kvList) % 2) here to handle the case where there is an + // odd number of elements in a kvList. + // If there is an odd number, then the last element in the slice will + // have the value 'null'. + for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { + k := kvList[i2] + // if we have already seen this key, do not include it again + if _, ok := seenKeys[k]; ok { + continue + } + // make a note that we've observed a new key + seenKeys[k] = struct{}{} + // attempt to obtain the value of the key + var v interface{} + // i2+1 should only ever be out of bounds if we handling the first + // iteration over a slice with an odd number of elements + if i2+1 < len(kvList) { + v = kvList[i2+1] + } + // add this KV tuple to the *start* of the output list to maintain + // the original order as we are iterating over the slice backwards + outs[i] = append([]interface{}{k, v}, outs[i]...) + } + } + return outs +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if k, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(k) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } + } +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeStringValue(b *bytes.Buffer, quote bool, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + if quote { + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + // Non-string with no line breaks. + b.WriteString(v) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 000000000..30fa1834f --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 000000000..db58f8baa --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "fmt" + "reflect" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + } + return ref.Name +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index dacdf9112..cb04590fe 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,7 +81,6 @@ import ( "math" "os" "path/filepath" - "reflect" "runtime" "strconv" "strings" @@ -90,81 +89,58 @@ import ( "time" "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" ) -// severity identifies the sort of log: info, warning etc. It also implements +// severityValue identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", +type severityValue struct { + severity.Severity } // get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) } // set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) } // String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) } // Get is part of the flag.Getter interface. -func (s *severity) Get() interface{} { - return *s +func (s *severityValue) Get() interface{} { + return s.Severity } // Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity +func (s *severityValue) Set(value string) error { + var threshold severity.Severity // Is it a known name? - if v, ok := severityByName(value); ok { + if v, ok := severity.ByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } - threshold = severity(v) + threshold = severity.Severity(v) } logging.stderrThreshold.set(threshold) return nil } -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 @@ -187,10 +163,10 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is @@ -404,9 +380,11 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults and runs flushDaemon. +// init sets up the defaults. func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } logging.setVState(0, nil, false) logging.logDir = "" logging.logFile = "" @@ -417,7 +395,7 @@ func init() { logging.addDirHeader = false logging.skipLogHeaders = false logging.oneOutput = false - go logging.flushDaemon() + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. @@ -457,20 +435,23 @@ type loggingT struct { alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex + // bufferCache maintains the free list. It uses its own mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. - freeListMu sync.Mutex + bufferCache buffer.Buffers // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter + file [severity.NumSeverity]flushSyncWriter + // flushD holds a flushDaemon that frequently flushes log file buffers. + flushD *flushDaemon + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. @@ -508,9 +489,6 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool - // If set, all output will be redirected unconditionally to the provided logr.Logger - logr *logr.Logger - // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -518,13 +496,6 @@ type loggingT struct { filter LogFilter } -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - var logging loggingT // setVState sets a consistent state for V logging. @@ -547,35 +518,6 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool l.verbosity.set(verbosity) } -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - var timeNow = time.Now // Stubbed out for testing. /* @@ -595,7 +537,7 @@ where the fields are defined as follows: line The line number msg The user-supplied message */ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" @@ -615,133 +557,68 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() +func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { + buf := l.bufferCache.GetBuffer() if l.skipHeaders { return buf } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) + now := timeNow() + buf.FormatHeader(s, file, line, now) return buf } -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] +func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) } -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logger is set, we clear the generated header as we rely on the backing // logger implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -750,19 +627,19 @@ func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, for if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -783,7 +660,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } - l.printS(err, errorLog, depth+1, msg, keysAndValues...) + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. @@ -795,140 +672,25 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } - l.printS(nil, infoLog, depth+1, msg, keysAndValues...) + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. // set log severity by s -func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.getBuffer() + b := l.bufferCache.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. b.WriteString(strconv.Quote(msg)) if err != nil { - kvListFormat(&b.Buffer, "err", err) + serialize.KVListFormat(&b.Buffer, "err", err) } - kvListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, &b.Buffer) + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.putBuffer(b) -} - -const missingValue = "(MISSING)" - -func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue - } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if k, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(k) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, stringerToString(v)) - case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, v.Error()) - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) - } - } -} - -func stringerToString(s fmt.Stringer) (ret string) { - defer func() { - if err := recover(); err != nil { - ret = "nil" - } - }() - ret = s.String() - return -} - -func writeStringValue(b *bytes.Buffer, quote bool, v string) { - data := []byte(v) - index := bytes.IndexByte(data, '\n') - if index == -1 { - b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) - return - } - - // Complex multi-line string, show as-is with indention like this: - // I... "hello world" key=< - // line 1 - // line 2 - // > - // - // Tabs indent the lines of the value while the end of string delimiter - // is indented with a space. That has two purposes: - // - visual difference between the two for a human reader because indention - // will be different - // - no ambiguity when some value line starts with the end delimiter - // - // One downside is that the output cannot distinguish between strings that - // end with a line break and those that don't because the end delimiter - // will always be on the next line. - b.WriteString("=<\n") - for index != -1 { - b.WriteByte('\t') - b.Write(data[0 : index+1]) - data = data[index+1:] - index = bytes.IndexByte(data, '\n') - } - if len(data) == 0 { - // String ended with line break, don't add another. - b.WriteString(" >") - } else { - // No line break at end of last line, write rest of string and - // add one. - b.WriteByte('\t') - b.Write(data) - b.WriteString("\n >") - } + l.bufferCache.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -948,36 +710,11 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } -// SetLogger will set the backing logr implementation for klog. -// If set, all log lines will be suppressed from the regular Output, and -// redirected to the logr implementation. -// Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) -// -// To remove a backing logr implemention, use ClearLogger. Setting an -// empty logger with SetLogger(logr.Logger{}) does not work. -func SetLogger(logr logr.Logger) { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = &logr -} - -// ClearLogger removes a backing logr implementation if one was set earlier -// with SetLogger. -func ClearLogger() { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = nil -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { rb := &redirectBuffer{ w: w, } @@ -989,7 +726,7 @@ func SetOutput(w io.Writer) { func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } @@ -1008,7 +745,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -1027,8 +764,8 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if log != nil { // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == errorLog { - l.logr.WithCallDepth(depth+3).Error(nil, string(data)) + if s == severity.ErrorLog { + globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -1042,13 +779,13 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } - l.file[infoLog].Write(data) + l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -1061,28 +798,28 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, l.file[s].Write(data) } else { switch s { - case fatalLog: - l.file[fatalLog].Write(data) + case severity.FatalLog: + l.file[severity.FatalLog].Write(data) fallthrough - case errorLog: - l.file[errorLog].Write(data) + case severity.ErrorLog: + l.file[severity.ErrorLog].Write(data) fallthrough - case warningLog: - l.file[warningLog].Write(data) + case severity.WarningLog: + l.file[severity.WarningLog].Write(data) fallthrough - case infoLog: - l.file[infoLog].Write(data) + case severity.InfoLog: + l.file[severity.InfoLog].Write(data) } } } } - if s == fatalLog { + if s == severity.FatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() isLocked = false - timeoutFlush(10 * time.Second) - os.Exit(1) + timeoutFlush(ExitFlushTimeout) + OsExit(1) } // Dump all goroutine stacks before exiting. trace := stacks(true) @@ -1092,17 +829,17 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, } // Write the stack trace for all goroutines to the files. logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { + for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() isLocked = false - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded(&) with 255 anyway. + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.putBuffer(buf) + l.bufferCache.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -1110,23 +847,6 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, } } -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. @@ -1163,7 +883,7 @@ func (l *loggingT) exit(err error) { return } l.flushAll() - os.Exit(2) + OsExit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the @@ -1174,7 +894,7 @@ type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File - sev severity + sev severity.Severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } @@ -1220,7 +940,7 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severity.Name[sb.sev], now, startup) if err != nil { return err } @@ -1258,11 +978,16 @@ const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. -func (l *loggingT) createFiles(sev severity) error { +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, @@ -1279,10 +1004,91 @@ func (l *loggingT) createFiles(sev severity) error { const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() +type flushDaemon struct { + mu sync.Mutex + clock clock.WithTicker + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} } + return &flushDaemon{ + flush: flush, + clock: tickClock, + } +} + +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return + } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) } // lockAndFlushAll is like flushAll but locks l.mu first. @@ -1296,13 +1102,16 @@ func (l *loggingT) lockAndFlushAll() { // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } + if globalLoggerOptions.flush != nil { + globalLoggerOptions.flush() + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1313,7 +1122,7 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } @@ -1325,7 +1134,7 @@ func CopyStandardLogTo(name string) { // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge severity.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -1349,7 +1158,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) return len(b), nil } @@ -1384,22 +1193,21 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr *logr.Logger - filter LogFilter } func newVerbose(level Level, b bool) Verbose { - if logging.logr == nil { - return Verbose{b, nil, logging.filter} + if globalLogger == nil { + return Verbose{b, nil} } - v := logging.logr.V(int(level)) - return Verbose{b, &v, logging.filter} + v := globalLogger.V(int(level)) + return Verbose{b, &v} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2).Enabled() { klog.Info("log this") } +// if klog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does @@ -1455,7 +1263,15 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, v.filter, args...) + logging.print(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1463,7 +1279,15 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, v.filter, args...) + logging.println(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1471,7 +1295,15 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, v.filter, format, args...) + logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) } } @@ -1479,28 +1311,28 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, depth, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, args...) + logging.errorS(err, v.logr, logging.filter, 0, msg, args...) } } @@ -1508,32 +1340,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, logging.filter, args...) + logging.print(severity.InfoLog, globalLogger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, logging.filter, args...) + logging.println(severity.InfoLog, globalLogger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1545,55 +1389,79 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, logging.filter, args...) + logging.print(severity.WarningLog, globalLogger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, logging.filter, args...) + logging.println(severity.WarningLog, globalLogger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, logging.filter, args...) + logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, logging.filter, args...) + logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1606,71 +1474,97 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) } -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, @@ -1681,79 +1575,10 @@ type LogFilter interface { FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. func SetLogFilter(filter LogFilter) { - logging.mu.Lock() - defer logging.mu.Unlock() - logging.filter = filter } - -// ObjectRef references a kubernetes object -type ObjectRef struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` -} - -func (ref ObjectRef) String() string { - if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) - } - return ref.Name -} - -// MarshalLog ensures that loggers with support for structured output will log -// as a struct by removing the String method via a custom type. -func (ref ObjectRef) MarshalLog() interface{} { - type or ObjectRef - return or(ref) -} - -var _ logr.Marshaler = ObjectRef{} - -// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -// this interface may expand in the future, but will always be a subset of the -// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -type KMetadata interface { - GetName() string - GetNamespace() string -} - -// KObj returns ObjectRef from ObjectMeta -func KObj(obj KMetadata) ObjectRef { - if obj == nil { - return ObjectRef{} - } - if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { - return ObjectRef{} - } - - return ObjectRef{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } -} - -// KRef returns ObjectRef from name and namespace -func KRef(namespace, name string) ObjectRef { - return ObjectRef{ - Name: name, - Namespace: namespace, - } -} - -// KObjs returns slice of ObjectRef from an slice of ObjectMeta -func KObjs(arg interface{}) []ObjectRef { - s := reflect.ValueOf(arg) - if s.Kind() != reflect.Slice { - return nil - } - objectRefs := make([]ObjectRef, 0, s.Len()) - for i := 0; i < s.Len(); i++ { - if v, ok := s.Index(i).Interface().(KMetadata); ok { - objectRefs = append(objectRefs, KObj(v)) - } else { - return nil - } - } - return objectRefs -} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 000000000..351d7a740 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + level int + callDepth int + prefix string + values []interface{} +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l klogger) Info(level int, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) +} + +func (l klogger) Enabled(level int) bool { + return V(Level(level)).Enabled() +} + +func (l klogger) Error(err error, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if len(l.prefix) > 0 { + l.prefix = l.prefix + "/" + } + l.prefix += name + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go index 435081065..4fa230b4e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go @@ -316,7 +316,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) { flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.") flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.") flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.") - flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.") + // Expect the test suite to work with both the new and legacy non-blocking control plane taints by default + flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.") flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.") flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.") diff --git a/vendor/modules.txt b/vendor/modules.txt index 09d16acd2..0b5ac8e51 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,7 +8,7 @@ github.com/armon/go-metrics # github.com/armon/go-radix v1.0.0 ## explicit github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.43.8 +# github.com/aws/aws-sdk-go v1.43.22 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -53,6 +53,52 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/aws/aws-sdk-go-v2 v1.15.0 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/aws +github.com/aws/aws-sdk-go-v2/aws/defaults +github.com/aws/aws-sdk-go-v2/aws/middleware +github.com/aws/aws-sdk-go-v2/aws/protocol/query +github.com/aws/aws-sdk-go-v2/aws/protocol/xml +github.com/aws/aws-sdk-go-v2/aws/ratelimit +github.com/aws/aws-sdk-go-v2/aws/retry +github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 +github.com/aws/aws-sdk-go-v2/aws/signer/v4 +github.com/aws/aws-sdk-go-v2/aws/transport/http +github.com/aws/aws-sdk-go-v2/internal/rand +github.com/aws/aws-sdk-go-v2/internal/sdk +github.com/aws/aws-sdk-go-v2/internal/strings +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight +github.com/aws/aws-sdk-go-v2/internal/timeconv +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/configsources +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/sts +github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sts/types +# github.com/aws/smithy-go v1.11.1 +## explicit; go 1.15 +github.com/aws/smithy-go +github.com/aws/smithy-go/document +github.com/aws/smithy-go/encoding +github.com/aws/smithy-go/encoding/httpbinding +github.com/aws/smithy-go/encoding/xml +github.com/aws/smithy-go/io +github.com/aws/smithy-go/logging +github.com/aws/smithy-go/middleware +github.com/aws/smithy-go/ptr +github.com/aws/smithy-go/rand +github.com/aws/smithy-go/time +github.com/aws/smithy-go/transport/http +github.com/aws/smithy-go/transport/http/internal/io # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -67,6 +113,7 @@ github.com/blang/semver github.com/cenkalti/backoff/v3 # github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 => ./api ## explicit; go 1.16 +github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd github.com/ceph/ceph-csi/api/deploy/ocp # github.com/ceph/go-ceph v0.14.0 @@ -150,8 +197,8 @@ github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/go-cmp v0.5.5 -## explicit; go 1.8 +# github.com/google/go-cmp v0.5.7 +## explicit; go 1.11 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -242,7 +289,7 @@ github.com/hashicorp/hcl/json/token ## explicit; go 1.13 github.com/hashicorp/vault/command/agent/auth github.com/hashicorp/vault/command/agent/auth/kubernetes -# github.com/hashicorp/vault/api v1.4.1 +# github.com/hashicorp/vault/api v1.5.0 ## explicit; go 1.13 github.com/hashicorp/vault/api # github.com/hashicorp/vault/sdk v0.4.1 @@ -280,7 +327,7 @@ github.com/jmespath/go-jmespath # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kubernetes-csi/csi-lib-utils v0.10.0 +# github.com/kubernetes-csi/csi-lib-utils v0.11.0 ## explicit; go 1.16 github.com/kubernetes-csi/csi-lib-utils/connection github.com/kubernetes-csi/csi-lib-utils/metrics @@ -363,8 +410,8 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.18.1 -## explicit; go 1.16 +# github.com/onsi/gomega v1.19.0 +## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -432,7 +479,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.7.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require @@ -521,7 +568,7 @@ golang.org/x/crypto/poly1305 golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd +# golang.org/x/net v0.0.0-20220225172249-27dd8689420f ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -590,7 +637,7 @@ google.golang.org/appengine/urlfetch google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.44.0 +# google.golang.org/grpc v1.45.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -642,8 +689,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.0 +## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -697,7 +744,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.23.4 => k8s.io/api v0.23.4 +# k8s.io/api v0.23.5 => k8s.io/api v0.23.5 ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -746,7 +793,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.23.4 => k8s.io/apimachinery v0.23.4 +# k8s.io/apimachinery v0.23.5 => k8s.io/apimachinery v0.23.5 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -802,7 +849,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.23.4 => k8s.io/apiserver v0.23.4 +# k8s.io/apiserver v0.23.5 => k8s.io/apiserver v0.23.5 ## explicit; go 1.16 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -843,7 +890,7 @@ k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.23.4 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.23.5 ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1086,12 +1133,12 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.23.4 => k8s.io/cloud-provider v0.23.4 +# k8s.io/cloud-provider v0.23.5 => k8s.io/cloud-provider v0.23.5 ## explicit; go 1.16 k8s.io/cloud-provider k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.23.4 => k8s.io/component-base v0.23.4 +# k8s.io/component-base v0.23.5 => k8s.io/component-base v0.23.5 ## explicit; go 1.16 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1102,26 +1149,30 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/testutil k8s.io/component-base/traces k8s.io/component-base/version -# k8s.io/component-helpers v0.23.4 => k8s.io/component-helpers v0.23.4 +# k8s.io/component-helpers v0.23.5 => k8s.io/component-helpers v0.23.5 ## explicit; go 1.16 k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/klog/v2 v2.40.1 +# k8s.io/klog/v2 v2.60.1 ## explicit; go 1.13 k8s.io/klog/v2 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity # k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 ## explicit; go 1.16 k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.4 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.5 ## explicit; go 1.16 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.4 +# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.5 ## explicit; go 1.16 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.23.4 +# k8s.io/kubernetes v1.23.5 ## explicit; go 1.16 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1184,7 +1235,7 @@ k8s.io/kubernetes/test/e2e/storage/podlogs k8s.io/kubernetes/test/e2e/storage/utils k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image -# k8s.io/mount-utils v0.23.4 => k8s.io/mount-utils v0.23.4 +# k8s.io/mount-utils v0.23.5 => k8s.io/mount-utils v0.23.5 ## explicit; go 1.16 k8s.io/mount-utils # k8s.io/utils v0.0.0-20211116205334-6203023598ed @@ -1204,7 +1255,7 @@ k8s.io/utils/nsenter k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client @@ -1263,29 +1314,29 @@ sigs.k8s.io/yaml # github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 # github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 # gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0 -# k8s.io/api => k8s.io/api v0.23.4 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.23.4 -# k8s.io/apiserver => k8s.io/apiserver v0.23.4 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.4 -# k8s.io/client-go => k8s.io/client-go v0.23.4 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.4 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.4 -# k8s.io/code-generator => k8s.io/code-generator v0.23.4 -# k8s.io/component-base => k8s.io/component-base v0.23.4 -# k8s.io/component-helpers => k8s.io/component-helpers v0.23.4 -# k8s.io/controller-manager => k8s.io/controller-manager v0.23.4 -# k8s.io/cri-api => k8s.io/cri-api v0.23.4 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.4 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.4 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.4 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.4 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.4 -# k8s.io/kubectl => k8s.io/kubectl v0.23.4 -# k8s.io/kubelet => k8s.io/kubelet v0.23.4 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.4 -# k8s.io/metrics => k8s.io/metrics v0.23.4 -# k8s.io/mount-utils => k8s.io/mount-utils v0.23.4 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.4 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.4 +# k8s.io/api => k8s.io/api v0.23.5 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.5 +# k8s.io/apimachinery => k8s.io/apimachinery v0.23.5 +# k8s.io/apiserver => k8s.io/apiserver v0.23.5 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.5 +# k8s.io/client-go => k8s.io/client-go v0.23.5 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.5 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.5 +# k8s.io/code-generator => k8s.io/code-generator v0.23.5 +# k8s.io/component-base => k8s.io/component-base v0.23.5 +# k8s.io/component-helpers => k8s.io/component-helpers v0.23.5 +# k8s.io/controller-manager => k8s.io/controller-manager v0.23.5 +# k8s.io/cri-api => k8s.io/cri-api v0.23.5 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.5 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.5 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.5 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.5 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.5 +# k8s.io/kubectl => k8s.io/kubectl v0.23.5 +# k8s.io/kubelet => k8s.io/kubelet v0.23.5 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.5 +# k8s.io/metrics => k8s.io/metrics v0.23.5 +# k8s.io/mount-utils => k8s.io/mount-utils v0.23.5 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.5 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.5 # layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index 6f247128d..a3e17cb10 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -43,10 +43,17 @@ type dialResult struct { connid int64 } +type pendingDial struct { + // resultCh is the channel to send the dial result to + resultCh chan<- dialResult + // cancelCh is the channel closed when resultCh no longer has a receiver + cancelCh <-chan struct{} +} + // grpcTunnel implements Tunnel type grpcTunnel struct { stream client.ProxyService_ProxyClient - pendingDial map[int64]chan<- dialResult + pendingDial map[int64]pendingDial conns map[int64]*conn pendingDialLock sync.RWMutex connsLock sync.RWMutex @@ -76,12 +83,13 @@ func CreateSingleUseGrpcTunnel(ctx context.Context, address string, opts ...grpc stream, err := grpcClient.Proxy(ctx) if err != nil { + c.Close() return nil, err } tunnel := &grpcTunnel{ stream: stream, - pendingDial: make(map[int64]chan<- dialResult), + pendingDial: make(map[int64]pendingDial), conns: make(map[int64]*conn), readTimeoutSeconds: 10, } @@ -110,7 +118,7 @@ func (t *grpcTunnel) serve(c clientConn) { case client.PacketType_DIAL_RSP: resp := pkt.GetDialResponse() t.pendingDialLock.RLock() - ch, ok := t.pendingDial[resp.Random] + pendingDial, ok := t.pendingDial[resp.Random] t.pendingDialLock.RUnlock() if !ok { @@ -122,10 +130,16 @@ func (t *grpcTunnel) serve(c clientConn) { connid: resp.ConnectID, } select { - case ch <- result: - default: - klog.ErrorS(fmt.Errorf("blocked pending channel"), "Received second dial response for connection request", "connectionID", resp.ConnectID, "dialID", resp.Random) - // On multiple dial responses, avoid leaking serve goroutine. + // try to send to the result channel + case pendingDial.resultCh <- result: + // unblock if the cancel channel is closed + case <-pendingDial.cancelCh: + // If there are no readers of the pending dial channel above, it means one of two things: + // 1. There was a second DIAL_RSP for the connection request (this is very unlikely but possible) + // 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context + // + // In either scenario, we should return here as this tunnel is no longer needed. + klog.V(1).InfoS("Pending dial has been cancelled; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random) return } } @@ -182,9 +196,16 @@ func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string) } random := rand.Int63() /* #nosec G404 */ - resCh := make(chan dialResult, 1) + + // This channel is closed once we're returning and no longer waiting on resultCh + cancelCh := make(chan struct{}) + defer close(cancelCh) + + // This channel MUST NOT be buffered. The sender needs to know when we are not receiving things, so they can abort. + resCh := make(chan dialResult) + t.pendingDialLock.Lock() - t.pendingDial[random] = resCh + t.pendingDial[random] = pendingDial{resultCh: resCh, cancelCh: cancelCh} t.pendingDialLock.Unlock() defer func() { t.pendingDialLock.Lock() @@ -225,8 +246,10 @@ func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string) t.conns[res.connid] = c t.connsLock.Unlock() case <-time.After(30 * time.Second): + klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random) return nil, errors.New("dial timeout, backstop") case <-ctx.Done(): + klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", ctx.Err(), "dialID", random) return nil, errors.New("dial timeout, context") }