From 2bb5ca9dbc2bd2041d924f0d9e9a55952b02f1e4 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Wed, 6 Apr 2022 13:45:10 +0530 Subject: [PATCH 01/20] doc: move release 3.4.0 to unsupported version As we have 3.6.0 release available now, moving 3.4.0 to unsupported version. Signed-off-by: Humble Chirammal --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 389d35e15..66f23ee0d 100644 --- a/README.md +++ b/README.md @@ -128,10 +128,10 @@ in the Kubernetes documentation. | v3.6.0 (Release) | quay.io/cephcsi/cephcsi | v3.6.0 | | v3.5.1 (Release) | quay.io/cephcsi/cephcsi | v3.5.1 | | v3.5.0 (Release) | quay.io/cephcsi/cephcsi | v3.5.0 | -| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 | | Deprecated Ceph CSI Release/Branch | Container image name | Image Tag | | ----------------------- | --------------------------------| --------- | +| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 | | v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 | | v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 | | v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 | From 1b52256a22d964d35b5d0fe26a80d1b2e7508006 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 6 Apr 2022 20:14:25 +0530 Subject: [PATCH 02/20] ci: add mergify rule for 3.6 backport added mergify rules to create the backport PR for release-3.6 branch. Signed-off-by: Madhu Rajanna --- .mergify.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 26576ceae..8381415a2 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -256,6 +256,14 @@ pull_request_rules: queue: name: default delete_head_branch: {} + - name: backport patches to release-v3.6 branch + conditions: + - base=devel + - label=backport-to-release-v3.6 + actions: + backport: + branches: + - release-v3.6 - name: remove outdated approvals on ci/centos conditions: - base=ci/centos From 519202629ee4de05c7d0dd341a1b3271248ced17 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 6 Apr 2022 20:35:41 +0530 Subject: [PATCH 03/20] ci: remove release-v3.4 mergify rules as we have deprecated the 3.4 release removing the mergify rules for the same. Signed-off-by: Madhu Rajanna --- .mergify.yml | 55 ---------------------------------------------------- 1 file changed, 55 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 8381415a2..16cc2270f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -37,23 +37,6 @@ queue_rules: - "status-success=ci/centos/mini-e2e/k8s-1.23" - "status-success=ci/centos/upgrade-tests-cephfs" - "status-success=ci/centos/upgrade-tests-rbd" - - and: - - base=release-v3.4 - - "status-success=codespell" - - "status-success=multi-arch-build" - - "status-success=go-test" - - "status-success=commitlint" - - "status-success=golangci-lint" - - "status-success=mod-check" - - "status-success=lint-extras" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.21" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - - "status-success=ci/centos/mini-e2e/k8s-1.21" - - "status-success=ci/centos/mini-e2e/k8s-1.22" - - "status-success=ci/centos/mini-e2e/k8s-1.23" - - "status-success=ci/centos/upgrade-tests-cephfs" - - "status-success=ci/centos/upgrade-tests-rbd" - and: - base=ci/centos - "status-success=ci/centos/job-validation" @@ -218,44 +201,6 @@ pull_request_rules: actions: queue: name: default - - name: backport patches to release-v3.4 branch - conditions: - - base=devel - - label=backport-to-release-v3.4 - actions: - backport: - branches: - - release-v3.4 - # automerge backports if CI successfully ran - - name: automerge backport release-v3.4 - conditions: - - author=mergify[bot] - - base=release-v3.4 - - label!=DNM - - "#approved-reviews-by>=2" - - "approved-reviews-by=@ceph/ceph-csi-contributors" - - "approved-reviews-by=@ceph/ceph-csi-maintainers" - - "status-success=codespell" - - "status-success=multi-arch-build" - - "status-success=go-test" - - "status-success=commitlint" - - "status-success=golangci-lint" - - "status-success=mod-check" - - "status-success=lint-extras" - - "#changes-requested-reviews-by=0" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.21" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - - "status-success=ci/centos/mini-e2e/k8s-1.21" - - "status-success=ci/centos/mini-e2e/k8s-1.22" - - "status-success=ci/centos/mini-e2e/k8s-1.23" - - "status-success=ci/centos/upgrade-tests-cephfs" - - "status-success=ci/centos/upgrade-tests-rbd" - - "status-success=DCO" - actions: - queue: - name: default - delete_head_branch: {} - name: backport patches to release-v3.6 branch conditions: - base=devel From 6333c4b1e6ca1ef7d52c7d4b9fb15d086cbb0341 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 6 Apr 2022 20:37:09 +0530 Subject: [PATCH 04/20] ci: remove duplicate release-3.5 merge rules we already have generic rules to merge the PR's in devel and release branches with `automatic merge` rules. Removing the duplicate release-3.5 rule. Signed-off-by: Madhu Rajanna --- .mergify.yml | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 16cc2270f..75c928ddc 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -169,38 +169,6 @@ pull_request_rules: backport: branches: - release-v3.5 - # automerge backports if CI successfully ran - - name: automerge backport release-v3.5 - conditions: - - author=mergify[bot] - - base=release-v3.5 - - label!=DNM - - "#approved-reviews-by>=2" - - "approved-reviews-by=@ceph/ceph-csi-contributors" - - "approved-reviews-by=@ceph/ceph-csi-maintainers" - - "#changes-requested-reviews-by=0" - - "status-success=codespell" - - "status-success=multi-arch-build" - - "status-success=go-test" - - "status-success=commitlint" - - "status-success=golangci-lint" - - "status-success=mod-check" - - "status-success=lint-extras" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.21" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.22" - - "status-success=ci/centos/mini-e2e-helm/k8s-1.23" - - "status-success=ci/centos/mini-e2e/k8s-1.21" - - "status-success=ci/centos/mini-e2e/k8s-1.22" - - "status-success=ci/centos/mini-e2e/k8s-1.23" - - "status-success=ci/centos/k8s-e2e-external-storage/1.21" - - "status-success=ci/centos/k8s-e2e-external-storage/1.22" - - "status-success=ci/centos/k8s-e2e-external-storage/1.23" - - "status-success=ci/centos/upgrade-tests-cephfs" - - "status-success=ci/centos/upgrade-tests-rbd" - - "status-success=DCO" - actions: - queue: - name: default - name: backport patches to release-v3.6 branch conditions: - base=devel From 7b2aef0d8120033f232e81204616f1d09df708cd Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 31 Mar 2022 18:59:33 +0530 Subject: [PATCH 05/20] util: add support for the nsenter add support to run rbd map and mount -t commands with the nsenter. complete design of pod/multus network is added here https://github.com/rook/rook/ blob/master/design/ceph/multus-network.md#csi-pods Signed-off-by: Madhu Rajanna --- .../templates/nodeplugin-daemonset.yaml | 1 + charts/ceph-csi-cephfs/values.yaml | 1 + charts/ceph-csi-rbd/values.yaml | 1 + .../cephfs/kubernetes/csi-cephfsplugin.yaml | 1 + examples/README.md | 36 ++++++++++ examples/csi-config-map-sample.yaml | 7 ++ internal/cephfs/mounter/fuse.go | 12 +++- internal/cephfs/mounter/kernel.go | 11 ++- internal/cephfs/nodeserver.go | 5 ++ internal/cephfs/store/volumeoptions.go | 2 + internal/rbd/nodeserver.go | 4 ++ internal/rbd/rbd_attach.go | 13 +++- internal/rbd/rbd_util.go | 2 + internal/util/cephcmds.go | 42 ++++++++++++ internal/util/csiconfig.go | 11 +++ internal/util/csiconfig_test.go | 67 +++++++++++++++++++ 16 files changed, 211 insertions(+), 5 deletions(-) diff --git a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml index b2244d195..58e61e93d 100644 --- a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml +++ b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml @@ -31,6 +31,7 @@ spec: priorityClassName: {{ .Values.nodeplugin.priorityClassName }} {{- end }} hostNetwork: true + hostPID: true # to use e.g. Rook orchestrated cluster, and mons' FQDN is # resolved through k8s service, set dns policy to cluster first dnsPolicy: ClusterFirstWithHostNet diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml index b8e902fac..73f396f30 100644 --- a/charts/ceph-csi-cephfs/values.yaml +++ b/charts/ceph-csi-cephfs/values.yaml @@ -27,6 +27,7 @@ serviceAccounts: # - "" # cephFS: # subvolumeGroup: "csi" +# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" csiConfig: [] # Set logging level for csi containers. diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml index a7bd91765..b6e6e55e4 100644 --- a/charts/ceph-csi-rbd/values.yaml +++ b/charts/ceph-csi-rbd/values.yaml @@ -25,6 +25,7 @@ serviceAccounts: # monitors: # - "" # - "" +# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" csiConfig: [] # Configuration details of clusterID,PoolID and FscID mapping diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index 5e1d0f9d6..e8e7f1cf2 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -15,6 +15,7 @@ spec: serviceAccountName: cephfs-csi-nodeplugin priorityClassName: system-node-critical hostNetwork: true + hostPID: true # to use e.g. Rook orchestrated cluster, and mons' FQDN is # resolved through k8s service, set dns policy to cluster first dnsPolicy: ClusterFirstWithHostNet diff --git a/examples/README.md b/examples/README.md index 674e26c57..fe14b3435 100644 --- a/examples/README.md +++ b/examples/README.md @@ -50,6 +50,42 @@ kubectl replace -f ./csi-config-map-sample.yaml Storage class and snapshot class, using `` as the value for the option `clusterID`, can now be created on the cluster. +## Running CephCSI with pod networking + +The current problem with Pod Networking, is when a CephFS/RBD volume is mounted +in a pod using Ceph CSI and then the CSI CephFS/RBD plugin is restarted or +terminated (e.g. by restarting or deleting its DaemonSet), all operations on +the volume become blocked, even after restarting the CSI pods. + +The only workaround is to restart the node where the Ceph CSI plugin pod was +restarted. This can be mitigated by running the `rbd map`/`mount -t` commands +in a different network namespace which does not get deleted when the CSI +CephFS/RBD plugin is restarted or terminated. + +If someone wants to run the CephCSI with the pod networking they can still do +by setting the `netNamespaceFilePath`. If this path is set CephCSI will execute +the `rbd map`/`mount -t` commands after entering the [network +namespace](https://man7.org/linux/man-pages/man7/network_namespaces.7.html) +specified by `netNamespaceFilePath` with the +[nsenter](https://man7.org/linux/man-pages/man1/nsenter.1.html) command. + +`netNamespaceFilePath` should point to the network namespace of some +long-running process, typically it would be a symlink to +`/proc//ns/net`. + +The long-running process can also be another pod which is a Daemonset which +never restarts. This Pod should only be stopped and restarted when a node is +stopped so that volume operations do not become blocked. The new DaemonSet pod +can contain a single container, responsible for holding its pod network alive. +It is used as a passthrough by the CephCSI plugin pod which when mounting or +mapping will use the network namespace of this pod. + +Once the pod is created get its PID and create a symlink to +`/proc//ns/net` in the hostPath volume shared with the csi-plugin pod and +specify the path in the `netNamespaceFilePath` option. + +*Note* This Pod should have `hostPID: true` in the Pod Spec. + ## Deploying the storage class Once the plugin is successfully deployed, you'll need to customize diff --git a/examples/csi-config-map-sample.yaml b/examples/csi-config-map-sample.yaml index 581fbd108..eddab8f9a 100644 --- a/examples/csi-config-map-sample.yaml +++ b/examples/csi-config-map-sample.yaml @@ -20,6 +20,12 @@ kind: ConfigMap # NOTE: Make sure you don't add radosNamespace option to a currently in use # configuration as it will cause issues. # The field "cephFS.subvolumeGroup" is optional and defaults to "csi". +# The fields are the various network namespace +# path for the Ceph cluster identified by the , This will be used +# by the CSI plugin to execute the rbd map/unmap and mount -t commands in the +# network namespace specified by the . +# If a CSI plugin is using more than one Ceph cluster, repeat the section for +# each such cluster in use. # NOTE: Changes to the configmap is automatically updated in the running pods, # thus restarting existing pods using the configmap is NOT required on edits # to the configmap. @@ -37,6 +43,7 @@ data: { "clusterID": "", "radosNamespace": "", + "netNamespaceFilePath": "/plugins/rbd.csi.ceph.com/net", "monitors": [ "", "", diff --git a/internal/cephfs/mounter/fuse.go b/internal/cephfs/mounter/fuse.go index 91077398b..2711db5ee 100644 --- a/internal/cephfs/mounter/fuse.go +++ b/internal/cephfs/mounter/fuse.go @@ -65,12 +65,20 @@ func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, vol if volOptions.FsName != "" { args = append(args, "--client_mds_namespace="+volOptions.FsName) } + var ( + stderr string + err error + ) + + if volOptions.NetNamespaceFilePath != "" { + _, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOptions.NetNamespaceFilePath, "ceph-fuse", args[:]...) + } else { + _, stderr, err = util.ExecCommand(ctx, "ceph-fuse", args[:]...) + } - _, stderr, err := util.ExecCommand(ctx, "ceph-fuse", args[:]...) if err != nil { return fmt.Errorf("%w stderr: %s", err, stderr) } - // Parse the output: // We need "starting fuse" meaning the mount is ok // and PID of the ceph-fuse daemon for unmount diff --git a/internal/cephfs/mounter/kernel.go b/internal/cephfs/mounter/kernel.go index ff515be28..b7ae67ff8 100644 --- a/internal/cephfs/mounter/kernel.go +++ b/internal/cephfs/mounter/kernel.go @@ -51,7 +51,16 @@ func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, v args = append(args, "-o", optionsStr) - _, stderr, err := util.ExecCommand(ctx, "mount", args[:]...) + var ( + stderr string + err error + ) + + if volOptions.NetNamespaceFilePath != "" { + _, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOptions.NetNamespaceFilePath, "mount", args[:]...) + } else { + _, stderr, err = util.ExecCommand(ctx, "mount", args[:]...) + } if err != nil { return fmt.Errorf("%w stderr: %s", err, stderr) } diff --git a/internal/cephfs/nodeserver.go b/internal/cephfs/nodeserver.go index 21457ff38..b7c10db55 100644 --- a/internal/cephfs/nodeserver.go +++ b/internal/cephfs/nodeserver.go @@ -126,6 +126,11 @@ func (ns *NodeServer) NodeStageVolume( } defer volOptions.Destroy() + volOptions.NetNamespaceFilePath, err = util.GetNetNamespaceFilePath(util.CsiConfigFile, volOptions.ClusterID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + mnt, err := mounter.New(volOptions) if err != nil { log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err) diff --git a/internal/cephfs/store/volumeoptions.go b/internal/cephfs/store/volumeoptions.go index 44706f61f..0f1a35e3b 100644 --- a/internal/cephfs/store/volumeoptions.go +++ b/internal/cephfs/store/volumeoptions.go @@ -50,6 +50,8 @@ type VolumeOptions struct { ProvisionVolume bool `json:"provisionVolume"` KernelMountOptions string `json:"kernelMountOptions"` FuseMountOptions string `json:"fuseMountOptions"` + // Network namespace file path to execute nsenter command + NetNamespaceFilePath string // conn is a connection to the Ceph cluster obtained from a ConnPool conn *util.ClusterConnection diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 143a31ef7..579f2c211 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -332,6 +332,10 @@ func (ns *NodeServer) NodeStageVolume( } defer rv.Destroy() + rv.NetNamespaceFilePath, err = util.GetNetNamespaceFilePath(util.CsiConfigFile, rv.ClusterID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } if isHealer { err = healerStageTransaction(ctx, cr, rv, stagingParentPath) if err != nil { diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index 1c872ba44..e2cddef50 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -457,8 +457,17 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util. mapArgs = append(mapArgs, "--read-only") } - // Execute map - stdout, stderr, err := util.ExecCommand(ctx, cli, mapArgs...) + var ( + stdout string + stderr string + err error + ) + + if volOpt.NetNamespaceFilePath != "" { + stdout, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOpt.NetNamespaceFilePath, cli, mapArgs...) + } else { + stdout, stderr, err = util.ExecCommand(ctx, cli, mapArgs...) + } if err != nil { log.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr) // unmap rbd image if connection timeout diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 3afe68962..4d4986ae2 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -156,6 +156,8 @@ type rbdVolume struct { LogStrategy string VolName string MonValueFromSecret string + // Network namespace file path to execute nsenter command + NetNamespaceFilePath string // RequestedVolSize has the size of the volume requested by the user and // this value will not be updated when doing getImageInfo() on rbdVolume. RequestedVolSize int64 diff --git a/internal/util/cephcmds.go b/internal/util/cephcmds.go index 66c32b196..71999e98d 100644 --- a/internal/util/cephcmds.go +++ b/internal/util/cephcmds.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "os" "os/exec" "time" @@ -32,6 +33,47 @@ import ( // InvalidPoolID used to denote an invalid pool. const InvalidPoolID int64 = -1 +// ExecuteCommandWithNSEnter executes passed in program with args with nsenter +// and returns separate stdout and stderr streams. In case ctx is not set to +// context.TODO(), the command will be logged after it was executed. +func ExecuteCommandWithNSEnter(ctx context.Context, netPath, program string, args ...string) (string, string, error) { + var ( + sanitizedArgs = StripSecretInArgs(args) + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + // check netPath exists + if _, err := os.Stat(netPath); err != nil { + return "", "", fmt.Errorf("failed to get stat for %s %w", netPath, err) + } + // nsenter --net=%s -- + args = append([]string{fmt.Sprintf("--net=%s", netPath), "--", program}, args...) + cmd := exec.Command("nsenter", args...) // #nosec:G204, commands executing not vulnerable. + + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + err := cmd.Run() + stdout := stdoutBuf.String() + stderr := stderrBuf.String() + + if err != nil { + err = fmt.Errorf("an error (%w) occurred while running %s args: %v", err, program, sanitizedArgs) + if ctx != context.TODO() { + log.UsefulLog(ctx, "%s", err) + } + + return stdout, stderr, err + } + + if ctx != context.TODO() { + log.UsefulLog(ctx, "command succeeded: %s %v", program, sanitizedArgs) + } + + return stdout, stderr, nil +} + // ExecCommand executes passed in program with args and returns separate stdout // and stderr streams. In case ctx is not set to context.TODO(), the command // will be logged after it was executed. diff --git a/internal/util/csiconfig.go b/internal/util/csiconfig.go index 97076cde6..a6e3dd534 100644 --- a/internal/util/csiconfig.go +++ b/internal/util/csiconfig.go @@ -49,6 +49,8 @@ type ClusterInfo struct { // SubvolumeGroup contains the name of the SubvolumeGroup for CSI volumes SubvolumeGroup string `json:"subvolumeGroup"` } `json:"cephFS"` + // symlink filepath for the network namespace where we need to execute commands. + NetNamespaceFilePath string `json:"netNamespaceFilePath"` } // Expected JSON structure in the passed in config file is, @@ -161,3 +163,12 @@ func GetClusterID(options map[string]string) (string, error) { return clusterID, nil } + +func GetNetNamespaceFilePath(pathToConfig, clusterID string) (string, error) { + cluster, err := readClusterInfo(pathToConfig, clusterID) + if err != nil { + return "", err + } + + return cluster.NetNamespaceFilePath, nil +} diff --git a/internal/util/csiconfig_test.go b/internal/util/csiconfig_test.go index fdcd24038..22ceadef2 100644 --- a/internal/util/csiconfig_test.go +++ b/internal/util/csiconfig_test.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "encoding/json" "os" "testing" ) @@ -138,3 +139,69 @@ func TestCSIConfig(t *testing.T) { t.Errorf("Test setup error %s", err) } } + +func TestGetNetNamespaceFilePath(t *testing.T) { + t.Parallel() + tests := []struct { + name string + clusterID string + want string + }{ + { + name: "get NetNamespaceFilePath for cluster-1", + clusterID: "cluster-1", + want: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster1-net", + }, + { + name: "get NetNamespaceFilePath for cluster-2", + clusterID: "cluster-2", + want: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster2-net", + }, + { + name: "when NetNamespaceFilePath is empty", + clusterID: "cluster-3", + want: "", + }, + } + + csiConfig := []ClusterInfo{ + { + ClusterID: "cluster-1", + Monitors: []string{"ip-1", "ip-2"}, + NetNamespaceFilePath: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster1-net", + }, + { + ClusterID: "cluster-2", + Monitors: []string{"ip-3", "ip-4"}, + NetNamespaceFilePath: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster2-net", + }, + { + ClusterID: "cluster-3", + Monitors: []string{"ip-5", "ip-6"}, + }, + } + csiConfigFileContent, err := json.Marshal(csiConfig) + if err != nil { + t.Errorf("failed to marshal csi config info %v", err) + } + tmpConfPath := t.TempDir() + "/ceph-csi.json" + err = os.WriteFile(tmpConfPath, csiConfigFileContent, 0o600) + if err != nil { + t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) + } + for _, tt := range tests { + ts := tt + t.Run(ts.name, func(t *testing.T) { + t.Parallel() + got, err := GetNetNamespaceFilePath(tmpConfPath, ts.clusterID) + if err != nil { + t.Errorf("GetNetNamespaceFilePath() error = %v", err) + + return + } + if got != ts.want { + t.Errorf("GetNetNamespaceFilePath() = %v, want %v", got, ts.want) + } + }) + } +} From 4d750ed0e550f36b016b17a965742f638d838cad Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 22 Feb 2022 17:53:45 +0530 Subject: [PATCH 06/20] rbd: add set/Get VolumeMetadata() utility function Define and use PV and PVC metadata keys used by external provisioner. The CSI external-provisioner (v1.6.0+) introduces the --extra-create-metadata flag, which automatically sets map parameters in the CSI CreateVolumeRequest. Add utility functions to set/Get PV/PVC/PVCNamespace metadata on image Signed-off-by: Prasanna Kumar Kalever --- internal/rbd/rbd_util.go | 13 +++++++++++++ internal/util/k8s/parameters.go | 22 +++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 4d4986ae2..c0cdfe796 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -28,6 +28,7 @@ import ( "time" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/go-ceph/rados" @@ -1920,3 +1921,15 @@ func genVolFromVolIDWithMigration( return rv, err } + +// setVolumeMetadata set PV/PVC/PVCNamespace metadata on RBD image. +func (rv *rbdVolume) setVolumeMetadata(parameters map[string]string) error { + for k, v := range k8s.GetVolumeMetadata(parameters) { + err := rv.SetMetadata(k, v) + if err != nil { + return fmt.Errorf("failed to set metadata key %q, value %q on image: %w", k, v, err) + } + } + + return nil +} diff --git a/internal/util/k8s/parameters.go b/internal/util/k8s/parameters.go index 5d85182b5..e5f36cb6b 100644 --- a/internal/util/k8s/parameters.go +++ b/internal/util/k8s/parameters.go @@ -23,7 +23,12 @@ import ( // to the driver on CreateVolumeRequest/CreateSnapshotRequest calls. const ( csiParameterPrefix = "csi.storage.k8s.io/" - pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" + + // PV and PVC metadata keys used by external provisioner as part of + // create requests as parameters, when `extra-create-metadata` is true. + pvcNameKey = "csi.storage.k8s.io/pvc/name" + pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" + pvNameKey = "csi.storage.k8s.io/pv/name" ) // RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix. @@ -43,3 +48,18 @@ func RemoveCSIPrefixedParameters(param map[string]string) map[string]string { func GetOwner(param map[string]string) string { return param[pvcNamespaceKey] } + +// GetVolumeMetadata filter parameters, only return PV/PVC/PVCNamespace metadata. +func GetVolumeMetadata(parameters map[string]string) map[string]string { + keys := []string{pvcNameKey, pvcNamespaceKey, pvNameKey} + newParam := map[string]string{} + for k, v := range parameters { + for _, key := range keys { + if strings.Contains(k, key) { + newParam[k] = v + } + } + } + + return newParam +} From 0119d69ab2f67f8209ec64947d5af63c22d801f1 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Mon, 21 Feb 2022 15:18:18 +0530 Subject: [PATCH 07/20] rbd: set PV/PVC details on the image as metadata on create This helps Monitoring solutions without access to Kubernetes clusters to display the details of the PV/PVC/NameSpace in their dashboard. Signed-off-by: Prasanna Kumar Kalever --- internal/rbd/controllerserver.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 1dc179e28..c39e943cc 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -329,6 +329,12 @@ func (cs *ControllerServer) CreateVolume( return nil, err } + // Set Metadata on PV Create + err = rbdVol.setVolumeMetadata(req.GetParameters()) + if err != nil { + return nil, err + } + return buildCreateVolumeResponse(req, rbdVol), nil } From ae5925f04cdce9b83425a7f8547be74779de8f63 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Mon, 21 Feb 2022 16:54:24 +0530 Subject: [PATCH 08/20] rbd: update PV/PVC metadata on a reattach of PV Example if a PVC was delete by setting `persistentVolumeReclaimPolicy` as `Retain` on PV, and PV is reattached to a new PVC, we make sure to update PV/PVC image metadata on a PV reattach. Signed-off-by: Prasanna Kumar Kalever --- .../persistentvolume/persistentvolume.go | 8 +++++++- internal/rbd/rbd_journal.go | 15 ++++++++++++++- internal/util/k8s/parameters.go | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/internal/controller/persistentvolume/persistentvolume.go b/internal/controller/persistentvolume/persistentvolume.go index e6957d88c..4bf46a7dd 100644 --- a/internal/controller/persistentvolume/persistentvolume.go +++ b/internal/controller/persistentvolume/persistentvolume.go @@ -177,7 +177,13 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime. } defer cr.DeleteCredentials() - rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, pvcNamespace, cr) + rbdVolID, err := rbd.RegenerateJournal( + pv.Spec.CSI.VolumeAttributes, + pv.Spec.ClaimRef.Name, + volumeHandler, + requestName, + pvcNamespace, + cr) if err != nil { log.ErrorLogMsg("failed to regenerate journal %s", err) diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index 4e2d03c6f..a8907ac7e 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -23,6 +23,7 @@ import ( "github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" ) @@ -533,9 +534,13 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent // Generate new volume Handler // The volume handler won't remain same as its contains poolID,clusterID etc // which are not same across clusters. +// nolint:gocyclo,cyclop // TODO: reduce complexity func RegenerateJournal( volumeAttributes map[string]string, - volumeID, requestName, owner string, + claimName, + volumeID, + requestName, + owner string, cr *util.Credentials) (string, error) { ctx := context.Background() var ( @@ -598,16 +603,24 @@ func RegenerateJournal( if err != nil { return "", err } + if imageData != nil { rbdVol.ReservedID = imageData.ImageUUID rbdVol.ImageID = imageData.ImageAttributes.ImageID rbdVol.Owner = imageData.ImageAttributes.Owner + rbdVol.RbdImageName = imageData.ImageAttributes.ImageName if rbdVol.ImageID == "" { err = rbdVol.storeImageID(ctx, j) if err != nil { return "", err } } + // Update Metadata on reattach of the same old PV + parameters := k8s.PrepareVolumeMetadata(claimName, rbdVol.Owner, "") + err = rbdVol.setVolumeMetadata(parameters) + if err != nil { + return "", fmt.Errorf("failed to set volume metadata: %w", err) + } // As the omap already exists for this image ID return nil. rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool, rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion) diff --git a/internal/util/k8s/parameters.go b/internal/util/k8s/parameters.go index e5f36cb6b..db2ee82de 100644 --- a/internal/util/k8s/parameters.go +++ b/internal/util/k8s/parameters.go @@ -63,3 +63,19 @@ func GetVolumeMetadata(parameters map[string]string) map[string]string { return newParam } + +// PrepareVolumeMetadata return PV/PVC/PVCNamespace metadata based on inputs. +func PrepareVolumeMetadata(pvcName, pvcNamespace, pvName string) map[string]string { + newParam := map[string]string{} + if pvcName != "" { + newParam[pvcNameKey] = pvcName + } + if pvcNamespace != "" { + newParam[pvcNamespaceKey] = pvcNamespace + } + if pvName != "" { + newParam[pvNameKey] = pvName + } + + return newParam +} From 0ef79c6fc0f6822529a460d29fdc4f220fc43c1f Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Mon, 21 Feb 2022 17:02:38 +0530 Subject: [PATCH 09/20] rbd: set metadata on restart of provisioner pod Make sure to set metadata when image exist, i.e. if the provisioner pod is restarted while createVolume is in progress, say it created the image but didn't yet set the metadata. Signed-off-by: Prasanna Kumar Kalever --- internal/rbd/controllerserver.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index c39e943cc..5d99b539f 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -452,6 +452,12 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C } } + // Set metadata on restart of provisioner pod when image exist + err := rbdVol.setVolumeMetadata(req.GetParameters()) + if err != nil { + return nil, err + } + return buildCreateVolumeResponse(req, rbdVol), nil } From 463990c99902c2624426bf7647a63b9bfe86762c Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 22 Feb 2022 16:43:45 +0530 Subject: [PATCH 10/20] e2e: add test cases for image metadata validation * create a PVC and check PVC/PV metadata on RBD image * create and delete a PVC, attach the old PV to a new PVC and check if PVC metadata is updated on RBD image Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) diff --git a/e2e/rbd.go b/e2e/rbd.go index ebd15b1fc..42e8324a5 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -29,6 +29,7 @@ import ( . "github.com/onsi/ginkgo" // nolint v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -89,6 +90,12 @@ var ( nbdMapOptions = "nbd:debug-rbd=20" e2eDefaultCephLogStrategy = "preserve" + + // PV and PVC metadata keys used by external provisioner as part of + // create requests as parameters, when `extra-create-metadata` is true. + pvcNameKey = "csi.storage.k8s.io/pvc/name" + pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" + pvNameKey = "csi.storage.k8s.io/pv/name" ) func deployRBDPlugin() { @@ -394,6 +401,187 @@ var _ = Describe("RBD", func() { } }) } + + By("create a PVC and check PVC/PV metadata on RBD image", func() { + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + pvc.Namespace = f.UniqueName + + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + + imageList, err := listRBDImages(f, defaultRBDPool) + if err != nil { + e2elog.Failf("failed to list rbd images: %v", err) + } + + pvcName, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s/%s %s", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr) + } + pvcName = strings.TrimSuffix(pvcName, "\n") + if pvcName != pvc.Name { + e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName) + } + + pvcNamespace, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s/%s %s", + rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get PVC namespace %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey, err, stdErr) + } + pvcNamespace = strings.TrimSuffix(pvcNamespace, "\n") + if pvcNamespace != pvc.Namespace { + e2elog.Failf("expected pvcNamespace %q got %q", pvc.Namespace, pvcNamespace) + } + + pvcObj, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get( + context.TODO(), + pvc.Name, + metav1.GetOptions{}) + if err != nil { + e2elog.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) + } + if pvcObj.Spec.VolumeName == "" { + e2elog.Logf("pv name is empty %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) + } + pvName, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s/%s %s", + rbdOptions(defaultRBDPool), imageList[0], pvNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get PV name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], pvNameKey, err, stdErr) + } + pvName = strings.TrimSuffix(pvName, "\n") + if pvName != pvcObj.Spec.VolumeName { + e2elog.Failf("expected pvName %q got %q", pvcObj.Spec.VolumeName, pvName) + } + + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete pvc: %v", err) + } + validateRBDImageCount(f, 0, defaultRBDPool) + }) + + By("reattach the old PV to a new PVC and check if PVC metadata is updated on RBD image", func() { + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + pvc.Namespace = f.UniqueName + + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + + imageList, err := listRBDImages(f, defaultRBDPool) + if err != nil { + e2elog.Failf("failed to list rbd images: %v", err) + } + + pvcName, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s/%s %s", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr) + } + pvcName = strings.TrimSuffix(pvcName, "\n") + if pvcName != pvc.Name { + e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName) + } + + pvcObj, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get( + context.TODO(), + pvc.Name, + metav1.GetOptions{}) + if err != nil { + e2elog.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) + } + if pvcObj.Spec.VolumeName == "" { + e2elog.Logf("pv name is empty %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) + } + + patchBytes := []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Retain", "claimRef": null}}`) + _, err = c.CoreV1().PersistentVolumes().Patch( + context.TODO(), + pvcObj.Spec.VolumeName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}) + if err != nil { + e2elog.Logf("error Patching PV %q for persistentVolumeReclaimPolicy and claimRef: %v", + pvcObj.Spec.VolumeName, err) + } + + err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete( + context.TODO(), + pvc.Name, + metav1.DeleteOptions{}) + if err != nil { + e2elog.Logf("failed to delete pvc: %w", err) + } + + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + + pvc.Name = "rbd-pvc-new" + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create new PVC: %v", err) + } + + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + + pvcName, stdErr, err = execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s/%s %s", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr) + } + pvcName = strings.TrimSuffix(pvcName, "\n") + if pvcName != pvc.Name { + e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName) + } + + patchBytes = []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Delete"}}`) + _, err = c.CoreV1().PersistentVolumes().Patch( + context.TODO(), + pvcObj.Spec.VolumeName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}) + if err != nil { + e2elog.Logf("error Patching PV %q for persistentVolumeReclaimPolicy: %v", pvcObj.Spec.VolumeName, err) + } + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete pvc: %v", err) + } + validateRBDImageCount(f, 0, defaultRBDPool) + }) + By("verify generic ephemeral volume support", func() { // generic ephemeral volume support is supported from 1.21 if k8sVersionGreaterEquals(f.ClientSet, 1, 21) { From 41fe2c7ddadef91617029604d5d47ca84aeb37f7 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Wed, 2 Mar 2022 14:42:22 +0530 Subject: [PATCH 11/20] rbd: set metadata on the snapshot Set snapshot-name/snapshot-namespace/snapshotcontent-name details on RBD backend snapshot image as metadata on snapshot Signed-off-by: Prasanna Kumar Kalever --- internal/rbd/controllerserver.go | 25 +++++++++++++++++++++++-- internal/rbd/rbd_util.go | 13 +++++++++++++ internal/util/k8s/parameters.go | 21 +++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 5d99b539f..315e18710 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -963,6 +963,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities( } // CreateSnapshot creates the snapshot in backend and stores metadata in store. +// nolint:cyclop // TODO: reduce complexity func (cs *ControllerServer) CreateSnapshot( ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { @@ -1036,7 +1037,7 @@ func (cs *ControllerServer) CreateSnapshot( return nil, status.Errorf(codes.Internal, err.Error()) } if found { - return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr) + return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr, req.GetParameters()) } err = flattenTemporaryClonedImages(ctx, rbdVol, cr) @@ -1062,6 +1063,16 @@ func (cs *ControllerServer) CreateSnapshot( return nil, status.Error(codes.Internal, err.Error()) } + // Update the metadata on snapshot not on the original image + rbdVol.RbdImageName = rbdSnap.RbdSnapName + + // Set snapshot-name/snapshot-namespace/snapshotcontent-name details + // on RBD backend image as metadata on create + err = rbdVol.setSnapshotMetadata(req.GetParameters()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ SizeBytes: vol.VolSize, @@ -1079,7 +1090,8 @@ func cloneFromSnapshot( ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, - cr *util.Credentials) (*csi.CreateSnapshotResponse, error) { + cr *util.Credentials, + parameters map[string]string) (*csi.CreateSnapshotResponse, error) { vol := generateVolFromSnap(rbdSnap) err := vol.Connect(cr) if err != nil { @@ -1112,6 +1124,15 @@ func cloneFromSnapshot( return nil, status.Errorf(codes.Internal, err.Error()) } + // Update snapshot-name/snapshot-namespace/snapshotcontent-name details on + // RBD backend image as metadata on restart of provisioner pod when image exist + if len(parameters) != 0 { + err = rbdVol.setSnapshotMetadata(parameters) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + return &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ SizeBytes: rbdSnap.VolSize, diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index c0cdfe796..7f00cfe18 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1933,3 +1933,16 @@ func (rv *rbdVolume) setVolumeMetadata(parameters map[string]string) error { return nil } + +// setSnapshotMetadata Set snapshot-name/snapshot-namespace/snapshotcontent-name metadata +// on RBD image. +func (rv *rbdVolume) setSnapshotMetadata(parameters map[string]string) error { + for k, v := range k8s.GetSnapshotMetadata(parameters) { + err := rv.SetMetadata(k, v) + if err != nil { + return fmt.Errorf("failed to set metadata key %q, value %q on image: %w", k, v, err) + } + } + + return nil +} diff --git a/internal/util/k8s/parameters.go b/internal/util/k8s/parameters.go index db2ee82de..f3d8fe894 100644 --- a/internal/util/k8s/parameters.go +++ b/internal/util/k8s/parameters.go @@ -29,6 +29,11 @@ const ( pvcNameKey = "csi.storage.k8s.io/pvc/name" pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" pvNameKey = "csi.storage.k8s.io/pv/name" + + // snapshot metadata keys. + volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name" + volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace" + volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name" ) // RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix. @@ -79,3 +84,19 @@ func PrepareVolumeMetadata(pvcName, pvcNamespace, pvName string) map[string]stri return newParam } + +// GetSnapshotMetadata filter parameters, only return +// snapshot-name/snapshot-namespace/snapshotcontent-name metadata. +func GetSnapshotMetadata(parameters map[string]string) map[string]string { + keys := []string{volSnapNameKey, volSnapNamespaceKey, volSnapContentNameKey} + newParam := map[string]string{} + for k, v := range parameters { + for _, key := range keys { + if strings.Contains(k, key) { + newParam[k] = v + } + } + } + + return newParam +} From d870cb152a00e6b2173c1a1fe7acfe89b2df2227 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Fri, 4 Mar 2022 11:05:42 +0530 Subject: [PATCH 12/20] deploy: add --extra-create-metadata arg to csi-snapshotter sidecar This argument in csi-snapshotter sidecar allows us to receive snapshot-name/snapshot-namespace/snapshotcontent-name metadata in the CreateSnapshot() request. For ex: csi.storage.k8s.io/volumesnapshot/name csi.storage.k8s.io/volumesnapshot/namespace csi.storage.k8s.io/volumesnapshotcontent/name This is a useful information which can be used depend on the use case we have at our driver. The features like adding metadata to snapshot image can consume this based on the need. Signed-off-by: Prasanna Kumar Kalever --- charts/ceph-csi-rbd/templates/provisioner-deployment.yaml | 1 + deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml index b3b09160d..482aecfca 100644 --- a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml @@ -102,6 +102,7 @@ spec: - "--v={{ .Values.logLevel }}" - "--timeout={{ .Values.provisioner.timeout }}" - "--leader-election=true" + - "--extra-create-metadata=true" env: - name: ADDRESS value: "unix:///csi/{{ .Values.provisionerSocketFile }}" diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index 6c70f7ec5..ce364636a 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -73,6 +73,7 @@ spec: - "--v=5" - "--timeout=150s" - "--leader-election=true" + - "--extra-create-metadata=true" env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock From 4100ad24061670c44fe94f1a85de8dcd34d237c9 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Wed, 2 Mar 2022 15:10:32 +0530 Subject: [PATCH 13/20] e2e: add test case for snapshot metadata validation Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 122 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 113 insertions(+), 9 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index 42e8324a5..a8a7d8c55 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -96,6 +96,11 @@ var ( pvcNameKey = "csi.storage.k8s.io/pvc/name" pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace" pvNameKey = "csi.storage.k8s.io/pv/name" + + // snapshot metadata keys. + volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name" + volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace" + volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name" ) func deployRBDPlugin() { @@ -422,7 +427,7 @@ var _ = Describe("RBD", func() { } pvcName, stdErr, err := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd image-meta get %s/%s %s", + fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), rookNamespace) if err != nil || stdErr != "" { @@ -435,7 +440,7 @@ var _ = Describe("RBD", func() { } pvcNamespace, stdErr, err := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd image-meta get %s/%s %s", + fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey), rookNamespace) if err != nil || stdErr != "" { @@ -458,7 +463,7 @@ var _ = Describe("RBD", func() { e2elog.Logf("pv name is empty %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) } pvName, stdErr, err := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd image-meta get %s/%s %s", + fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(defaultRBDPool), imageList[0], pvNameKey), rookNamespace) if err != nil || stdErr != "" { @@ -497,7 +502,7 @@ var _ = Describe("RBD", func() { } pvcName, stdErr, err := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd image-meta get %s/%s %s", + fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), rookNamespace) if err != nil || stdErr != "" { @@ -543,8 +548,10 @@ var _ = Describe("RBD", func() { // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) - pvc.Name = "rbd-pvc-new" - err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + pvcObj.Name = "rbd-pvc-new" + // unset the resource version as should not be set on objects to be created + pvcObj.ResourceVersion = "" + err = createPVCAndvalidatePV(f.ClientSet, pvcObj, deployTimeout) if err != nil { e2elog.Failf("failed to create new PVC: %v", err) } @@ -553,7 +560,7 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, 1, defaultRBDPool) pvcName, stdErr, err = execCommandInToolBoxPod(f, - fmt.Sprintf("rbd image-meta get %s/%s %s", + fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(defaultRBDPool), imageList[0], pvcNameKey), rookNamespace) if err != nil || stdErr != "" { @@ -561,8 +568,8 @@ var _ = Describe("RBD", func() { rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr) } pvcName = strings.TrimSuffix(pvcName, "\n") - if pvcName != pvc.Name { - e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName) + if pvcName != pvcObj.Name { + e2elog.Failf("expected pvcName %q got %q", pvcObj.Name, pvcName) } patchBytes = []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Delete"}}`) @@ -575,6 +582,103 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Logf("error Patching PV %q for persistentVolumeReclaimPolicy: %v", pvcObj.Spec.VolumeName, err) } + err = deletePVCAndValidatePV(f.ClientSet, pvcObj, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete pvc: %v", err) + } + validateRBDImageCount(f, 0, defaultRBDPool) + }) + + By("create a snapshot and check metadata on RBD snapshot image", func() { + err := createRBDSnapshotClass(f) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + defer func() { + err = deleteRBDSnapshotClass() + if err != nil { + e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err) + } + }() + + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + e2elog.Failf("failed to create pvc and application binding: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + // delete pod as we should not create snapshot for in-use pvc + err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete application: %v", err) + } + + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + + err = createSnapshot(&snap, deployTimeout) + if err != nil { + e2elog.Failf("failed to create snapshot: %v", err) + } + // validate created backend rbd images + // parent PVC + snapshot + totalImages := 2 + validateRBDImageCount(f, totalImages, defaultRBDPool) + + imageList, err := listRBDImages(f, defaultRBDPool) + if err != nil { + e2elog.Failf("failed to list rbd images: %v", err) + } + + volSnapName, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s --image=%s %s", + rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get volume snapshot name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey, err, stdErr) + } + volSnapName = strings.TrimSuffix(volSnapName, "\n") + if volSnapName != snap.Name { + e2elog.Failf("expected volSnapName %q got %q", snap.Name, volSnapName) + } + + volSnapNamespace, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s --image=%s %s", + rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get volume snapshot namespace %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey, err, stdErr) + } + volSnapNamespace = strings.TrimSuffix(volSnapNamespace, "\n") + if volSnapNamespace != snap.Namespace { + e2elog.Failf("expected volSnapNamespace %q got %q", snap.Namespace, volSnapNamespace) + } + + content, err := getVolumeSnapshotContent(snap.Namespace, snap.Name) + if err != nil { + e2elog.Failf("failed to get snapshotcontent for %s in namespace %s: %v", + snap.Name, snap.Namespace, err) + } + volSnapContentName, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd image-meta get %s --image=%s %s", + rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey), + rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("failed to get snapshotcontent name %s/%s %s: err=%v stdErr=%q", + rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey, err, stdErr) + } + volSnapContentName = strings.TrimSuffix(volSnapContentName, "\n") + if volSnapContentName != content.Name { + e2elog.Failf("expected volSnapContentName %q got %q", content.Name, volSnapContentName) + } + + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete snapshot: %v", err) + } err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to delete pvc: %v", err) From 959df4dbac9c23f8b302b768855e2ca21ae60afe Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Thu, 7 Apr 2022 17:04:25 +0530 Subject: [PATCH 14/20] doc: correct typos in struct field comments and release.md corrected strings in the release guide and util server. Signed-off-by: Humble Chirammal --- docs/development-guide.md | 8 ++++---- docs/metrics.md | 6 +++--- docs/releases.md | 2 +- internal/util/util.go | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/development-guide.md b/docs/development-guide.md index 6ab863749..05dc6b1df 100644 --- a/docs/development-guide.md +++ b/docs/development-guide.md @@ -25,7 +25,7 @@ it is **highly** encouraged to: package](https://github.com/ceph/go-ceph). It is required to install the Ceph C headers in order to compile Ceph-CSI. The packages are called `librados-devel` and `librbd-devel` on many Linux distributions. See the - [go-ceph installaton + [go-ceph installation instructions](https://github.com/ceph/go-ceph#installation) for more details. * Run @@ -169,7 +169,7 @@ The `component` in the subject of the commit message can be one of the following * `doc`: documentation updates * `util`: utilities shared between components use `cephfs` or `rbd` if the change is only relevant for one of the type of storage -* `journal`: any of the journalling functionalities +* `journal`: any of the journaling functionalities * `helm`: deployment changes for the Helm charts * `deploy`: updates to Kubernetes templates for deploying components * `build`: anything related to building Ceph-CSI, the executable or container @@ -312,12 +312,12 @@ Right now, we also have below commands to manually retrigger the CI jobs **Caution**: Please do not retrigger the CI jobs without an understanding of the root cause, because: -* We may miss some of the important corner cases which are true negatives, +* We may miss some important corner cases which are true negatives, and hard to reproduce * Retriggering jobs for known failures can unnecessarily put CI resources under pressure -Hence it is recommended that you please go through the CI logs first, if you +Hence, it is recommended that you please go through the CI logs first, if you are certain about the flaky test failure behavior, then comment on the PR indicating the logs about a particular test that went flaky and use the appropriate command to retrigger the job[s]. diff --git a/docs/metrics.md b/docs/metrics.md index 530a20b4e..1f162daf4 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -26,10 +26,10 @@ Promethues can be deployed through the promethues operator described [here](http The [service-monitor](../examples/service-monitor.yaml) will tell promethues how to pull metrics out of CSI. -Each CSI pod has a service to expose the endpoint to prometheus. By default rbd +Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd pods run on port 8080 and cephfs 8081. These can be changed if desired or if multiple ceph clusters are deployed more ports will be used for additional CSI pods. -Note: You may need to open the ports used in your firewall depending on how you -cluster is setup. +Note: You may need to open the ports used in your firewall depending on how your +cluster has set up. diff --git a/docs/releases.md b/docs/releases.md index 2c08b98ff..2e0e39441 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -41,7 +41,7 @@ and it must be backward compatible. - When `MAJOR` increases, the new release adds **new features, bug fixes, or both** and which *changes the behavior from - the previous release* (may be backward incompatible). + the previous release* (maybe backward incompatible). ## Tagging repositories diff --git a/internal/util/util.go b/internal/util/util.go index c67fd0570..b2fda13a8 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -90,7 +90,7 @@ type Config struct { EnableGRPCMetrics bool // option to enable grpc metrics EnableProfiling bool // flag to enable profiling - IsControllerServer bool // if set to true start provisoner server + IsControllerServer bool // if set to true start provisioner server IsNodeServer bool // if set to true start node server Version bool // cephcsi version @@ -164,7 +164,7 @@ type KernelVersion struct { SubLevel int ExtraVersion int // prefix of the part after the first "-" Distribution string // component of full extraversion - Backport bool // backports have a fixed version/patchlevel/sublevel + Backport bool // backport have a fixed version/patchlevel/sublevel } // parseKernelRelease parses a kernel release version string into: @@ -202,9 +202,9 @@ func parseKernelRelease(release string) (int, int, int, int, error) { // CheckKernelSupport checks the running kernel and comparing it to known // versions that have support for required features . Distributors of -// enterprise Linux have backported quota support to previous versions. This +// enterprise Linux have backport quota support to previous versions. This // function checks if the running kernel is one of the versions that have the -// feature/fixes backported. +// feature/fixes backport. // // `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor // This can be slit up in the following components: - version (1) - patchlevel From 9b8bcbd874cbb9da963a0eb164eadd8aad62b8ad Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 11 Apr 2022 13:50:27 +0530 Subject: [PATCH 15/20] e2e: correct the cephfs fuse verification podnames The podnames were wrongly mention in the e2e tests and this correct the same. Signed-off-by: Humble Chirammal --- e2e/cephfs.go | 8 ++++---- e2e/kms.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index b59827b9d..4b42b75a1 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -456,7 +456,7 @@ var _ = Describe("cephfs", func() { // clean up after ourselves err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC: %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) err = deleteResource(cephFSExamplePath + "storageclass.yaml") @@ -575,7 +575,7 @@ var _ = Describe("cephfs", func() { if err != nil { e2elog.Failf("killall command failed: err %v, stderr %s", err, stdErr) } - // Verify Pod podName2 that stat()-ing the mountpoint results in ENOTCONN. + // Verify Pod pod2Name that stat()-ing the mountpoint results in ENOTCONN. stdErr, err = doStat(pod2Name) if err == nil || !strings.Contains(stdErr, "not connected") { e2elog.Failf( @@ -583,7 +583,7 @@ var _ = Describe("cephfs", func() { err, stdErr, ) } - // Delete podName2 Pod. This serves two purposes: it verifies that deleting pods with + // Delete pod2Name Pod. This serves two purposes: it verifies that deleting pods with // corrupted ceph-fuse mountpoints works, and it lets the replicaset controller recreate // the pod with hopefully mounts working again. err = deletePod(pod2Name, depl.Namespace, c, deployTimeout) @@ -616,7 +616,7 @@ var _ = Describe("cephfs", func() { } e2elog.Failf("no new replica found ; found replicas %v", podNames) } - // Verify Pod podName3 has its ceph-fuse mount working again. + // Verify Pod pod2Name has its ceph-fuse mount working again. err = ensureStatSucceeds(pod2Name) if err != nil { e2elog.Failf(err.Error()) diff --git a/e2e/kms.go b/e2e/kms.go index 0d3ae9655..7cf1a74e0 100644 --- a/e2e/kms.go +++ b/e2e/kms.go @@ -31,7 +31,7 @@ const ( // kmsConfig is an interface that should be used when passing a configuration // for a KMS to validation functions. This allows the validation functions to -// work independently from the actual KMS. +// work independently of the actual KMS. type kmsConfig interface { canGetPassphrase() bool getPassphrase(f *framework.Framework, key string) (string, string) From a44a97f3c56426d420fdb790c68d06545c4f1657 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 11 Apr 2022 14:13:21 +0530 Subject: [PATCH 16/20] deploy: snapshot deployment is no longer beta Considering snapshot controllers have been moved to GA since kube version 1.20, we no longer need to have a mention of beta version of the same in our deployment. Signed-off-by: Humble Chirammal --- e2e/README.md | 24 ------------------------ scripts/install-snapshot.sh | 2 +- 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/e2e/README.md b/e2e/README.md index 4d44ba8f5..2b7b73772 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -106,30 +106,6 @@ are available while running tests: After the support for snapshot/clone has been added to ceph-csi, you need to follow these steps before running e2e. -Please note that the snapshot operation works only if the Kubernetes version -is greater than or equal to 1.17.0. - -- Delete Alpha snapshot CRD created by ceph-csi in rook. - - Check if you have any `v1alpha1` CRD created in our Kubernetes cluster - - ```bash - $ kubectl get crd volumesnapshotclasses.snapshot.storage.k8s.io -o yaml |grep v1alpha1 - - name: v1alpha1 - - v1alpha1 - $ kubectl get crd volumesnapshotcontents.snapshot.storage.k8s.io -o yaml |grep v1alpha1 - - name: v1alpha1 - - v1alpha1 - $ kubectl get crd volumesnapshots.snapshot.storage.k8s.io -o yaml |grep v1alpha1 - - name: v1alpha1 - - v1alpha1 - ``` - - - If you have Alpha CRD, delete it as from Kubernetes 1.17.0+ the snapshot - should be `v1beta1` - - ```console - ./scripts/install-snapshot.sh delete-crd - ``` - Install snapshot controller and Beta snapshot CRD diff --git a/scripts/install-snapshot.sh b/scripts/install-snapshot.sh index 31c661539..8ba636608 100755 --- a/scripts/install-snapshot.sh +++ b/scripts/install-snapshot.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# This script can be used to install/delete snapshotcontroller and snapshot beta CRD +# This script can be used to install/delete snapshotcontroller and snapshot CRD SCRIPT_DIR="$(dirname "${0}")" From 539050a85741ec8d602aa6bf7d9d5df75c7dbd01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Apr 2022 14:26:28 +0000 Subject: [PATCH 17/20] rebase: bump github.com/aws/aws-sdk-go-v2/service/sts Bumps [github.com/aws/aws-sdk-go-v2/service/sts](https://github.com/aws/aws-sdk-go-v2) from 1.16.0 to 1.16.3. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.16.0...service/efs/v1.16.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/sts dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 24 ++-- .../aws/aws-sdk-go-v2/aws/credential_cache.go | 133 ++++++++++++++---- .../aws/aws-sdk-go-v2/aws/credentials.go | 12 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 12 ++ .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 12 ++ .../endpoints/v2/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/internal/rand/rand.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 12 ++ .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 12 ++ .../service/sts/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 4 + .../aws/smithy-go/encoding/httpbinding/uri.go | 3 + .../aws/smithy-go/go_module_metadata.go | 2 +- vendor/modules.txt | 12 +- 18 files changed, 200 insertions(+), 62 deletions(-) diff --git a/go.mod b/go.mod index 0b6a4779b..a9e7dcc50 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/IBM/keyprotect-go-client v0.7.0 github.com/aws/aws-sdk-go v1.43.32 - github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/go-ceph v0.14.0 github.com/container-storage-interface/spec v1.5.0 @@ -44,11 +44,11 @@ require ( require ( github.com/armon/go-metrics v0.3.9 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect - github.com/aws/smithy-go v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.16.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect + github.com/aws/smithy-go v1.11.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.2.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect diff --git a/go.sum b/go.sum index 565ff6f73..09ee4de5e 100644 --- a/go.sum +++ b/go.sum @@ -140,18 +140,18 @@ github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9 github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.32 h1:b2NQnfWfImfo7yzXq6gzXEC+6s5v1t2RU3G9o+VirYo= github.com/aws/aws-sdk-go v1.43.32/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js= -github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s= -github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= -github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= +github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go index 1411a5c32..dfd2b1ddb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -2,6 +2,7 @@ package aws import ( "context" + "fmt" "sync/atomic" "time" @@ -24,11 +25,13 @@ type CredentialsCacheOptions struct { // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration - // ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials - // within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0. + // ExpiryWindowJitterFrac provides a mechanism for randomizing the + // expiration of credentials within the configured ExpiryWindow by a random + // percentage. Valid values are between 0.0 and 1.0. // - // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to - // expire between 30 to 60 seconds prior to their actual expiration time. + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac + // is 0.5 then credentials will be set to expire between 30 to 60 seconds + // prior to their actual expiration time. // // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. @@ -39,8 +42,19 @@ type CredentialsCacheOptions struct { // CredentialsCache provides caching and concurrency safe credentials retrieval // via the provider's retrieve method. +// +// CredentialsCache will look for optional interfaces on the Provider to adjust +// how the credential cache handles credentials caching. +// +// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. +// +// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. type CredentialsCache struct { - // provider is the CredentialProvider implementation to be wrapped by the CredentialCache. provider CredentialsProvider options CredentialsCacheOptions @@ -48,8 +62,9 @@ type CredentialsCache struct { sf singleflight.Group } -// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic -// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider +// is expected to not be nil. A variadic list of one or more functions can be +// provided to modify the CredentialsCache configuration. This allows for // configuration of credential expiry window and jitter. func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { options := CredentialsCacheOptions{} @@ -81,8 +96,8 @@ func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *C // // Returns and error if the provider's retrieve method returns an error. func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { - if creds := p.getCreds(); creds != nil { - return *creds, nil + if creds, ok := p.getCreds(); ok && !creds.Expired() { + return creds, nil } resCh := p.sf.DoChan("", func() (interface{}, error) { @@ -97,39 +112,64 @@ func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { } func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { - if creds := p.getCreds(); creds != nil { - return *creds, nil + currCreds, ok := p.getCreds() + if ok && !currCreds.Expired() { + return currCreds, nil } - creds, err := p.provider.Retrieve(ctx) - if err == nil { - if creds.CanExpire { - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return Credentials{}, err - } - jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) - creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter)) + newCreds, err := p.provider.Retrieve(ctx) + if err != nil { + handleFailToRefresh := defaultHandleFailToRefresh + if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { + handleFailToRefresh = cs.HandleFailToRefresh + } + newCreds, err = handleFailToRefresh(ctx, currCreds, err) + if err != nil { + return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) + } + } + + if newCreds.CanExpire && p.options.ExpiryWindow > 0 { + adjustExpiresBy := defaultAdjustExpiresBy + if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { + adjustExpiresBy = cs.AdjustExpiresBy } - p.creds.Store(&creds) + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) + } + + var jitter time.Duration + if p.options.ExpiryWindowJitterFrac > 0 { + jitter = time.Duration(randFloat64 * + p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + } + + newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) + if err != nil { + return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) + } } - return creds, err + p.creds.Store(&newCreds) + return newCreds, nil } -func (p *CredentialsCache) getCreds() *Credentials { +// getCreds returns the currently stored credentials and true. Returning false +// if no credentials were stored. +func (p *CredentialsCache) getCreds() (Credentials, bool) { v := p.creds.Load() if v == nil { - return nil + return Credentials{}, false } c := v.(*Credentials) - if c != nil && c.HasKeys() && !c.Expired() { - return c + if c == nil || !c.HasKeys() { + return Credentials{}, false } - return nil + return *c, true } // Invalidate will invalidate the cached credentials. The next call to Retrieve @@ -137,3 +177,42 @@ func (p *CredentialsCache) getCreds() *Credentials { func (p *CredentialsCache) Invalidate() { p.creds.Store((*Credentials)(nil)) } + +// HandleFailRefreshCredentialsCacheStrategy is an interface for +// CredentialsCache to allow CredentialsProvider how failed to refresh +// credentials is handled. +type HandleFailRefreshCredentialsCacheStrategy interface { + // Given the previously cached Credentials, if any, and refresh error, may + // returns new or modified set of Credentials, or error. + // + // Credential caches may use default implementation if nil. + HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) +} + +// defaultHandleFailToRefresh returns the passed in error. +func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { + return Credentials{}, err +} + +// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache +// to allow CredentialsProvider to intercept adjustments to Credentials expiry +// based on expectations and use cases of CredentialsProvider. +// +// Credential caches may use default implementation if nil. +type AdjustExpiresByCredentialsCacheStrategy interface { + // Given a Credentials as input, applying any mutations and + // returning the potentially updated Credentials, or error. + AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) +} + +// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, +// and returns the updated credentials value. If Credentials value's CanExpire +// is false, the passed in credentials are returned unchanged. +func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { + if !creds.CanExpire { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index ce3868a9f..0fffc53e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -83,16 +83,20 @@ type Credentials struct { // Source of the credentials Source string - // Time the credentials will expire. + // States if the credentials can expire or not. CanExpire bool - Expires time.Time + + // The time the credentials will expire at. Should be ignored if CanExpire + // is false. + Expires time.Time } // Expired returns if the credentials have expired. func (v Credentials) Expired() bool { if v.CanExpire { - // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry - // time is always based on reported wall-clock time. + // Calling Round(0) on the current time will truncate the monotonic + // reading only. Ensures credential expiry time is always based on + // reported wall-clock time. return !v.Expires.After(sdk.NowTime().Round(0)) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 303e48c5f..f12e3356f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.0" +const goModuleVersion = "1.16.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 1ac06cf74..856aec912 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.1.9 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.6 (2022-03-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 04fe4f6b5..d17db4207 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.6" +const goModuleVersion = "1.1.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 790b2d203..3473a9823 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,15 @@ +# v2.4.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.0 (2022-03-08) * **Feature**: Updated `github.com/aws/smithy-go` to latest version diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 71b4d202a..88f74540a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.0" +const goModuleVersion = "2.4.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go index 9791ea590..c8484dcd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -29,5 +29,5 @@ func Float64(reader io.Reader) (float64, error) { // CryptoRandFloat64 returns a random float64 obtained from the crypto rand // source. func CryptoRandFloat64() (float64, error) { - return Float64(rand.Reader) + return Float64(Reader) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index fe3f378d1..d779c398d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.9.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.0 (2022-03-08) * **Feature**: Updated `github.com/aws/smithy-go` to latest version diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 1734a92c3..f30bfff65 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.0" +const goModuleVersion = "1.9.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 5f7a8af89..6ad12851a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.0 (2022-03-08) * **Feature**: Updated `github.com/aws/smithy-go` to latest version diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 9af7dc268..c81e72b70 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.0" +const goModuleVersion = "1.16.3" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index c1daf6741..a5b73cf60 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,7 @@ +# Release (v1.11.2) + +* No change notes available for this release. + # Release (v1.11.1) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go index 64e40121e..f04e11984 100644 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -20,6 +20,9 @@ func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIV func (u URIValue) modifyURI(value string) (err error) { *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + if err != nil { + return err + } *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) return err } diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index c1e70fc68..7e252ec8c 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.1" +const goModuleVersion = "1.11.2" diff --git a/vendor/modules.txt b/vendor/modules.txt index 60e77b432..f8028e68f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -53,7 +53,7 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.15.0 +# github.com/aws/aws-sdk-go-v2 v1.16.2 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -70,21 +70,21 @@ github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 +# github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.11.1 +# github.com/aws/smithy-go v1.11.2 ## explicit; go 1.15 github.com/aws/smithy-go github.com/aws/smithy-go/document From dffb6e72c2ab48d7b90197e405ae0d3d76a5fbc0 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 11 Apr 2022 13:39:14 +0530 Subject: [PATCH 18/20] rbd: check nbd tool features only for rbd driver calling setRbdNbdToolFeatures inside an init gets called in main.go for both cephfs and rbd driver. instead of calling it in init function calling this in rbd driver.go as this is specific to rbd. Signed-off-by: Madhu Rajanna --- internal/rbd/driver/driver.go | 2 ++ internal/rbd/rbd_attach.go | 9 +++------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/internal/rbd/driver/driver.go b/internal/rbd/driver/driver.go index bc41c8d63..9cff40f3d 100644 --- a/internal/rbd/driver/driver.go +++ b/internal/rbd/driver/driver.go @@ -153,6 +153,8 @@ func (r *Driver) Run(conf *util.Config) { log.FatalLogMsg(err.Error()) } rbd.SetGlobalInt("krbdFeatures", krbdFeatures) + + rbd.SetRbdNbdToolFeatures() } if conf.IsControllerServer { diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index e2cddef50..7afaede51 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -92,10 +92,6 @@ var ( } ) -func init() { - setRbdNbdToolFeatures() -} - // rbdDeviceInfo strongly typed JSON spec for rbd device list output (of type krbd). type rbdDeviceInfo struct { ID string `json:"id"` @@ -216,8 +212,9 @@ func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries return "", false } -// set features available with rbd-nbd, and NBD module loaded status. -func setRbdNbdToolFeatures() { +// SetRbdNbdToolFeatures sets features available with rbd-nbd, and NBD module +// loaded status. +func SetRbdNbdToolFeatures() { var stderr string // check if the module is loaded or compiled in _, err := os.Stat(fmt.Sprintf("/sys/module/%s", moduleNbd)) From a6a1a7775eaf576163e6aa35d1a6df8f25954893 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 11 Apr 2022 14:45:13 +0530 Subject: [PATCH 19/20] doc: mention csi string in flag arguments correctly This commit correct the csi string mention in the documentation. Signed-off-by: Humble Chirammal --- e2e/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/e2e/README.md b/e2e/README.md index 2b7b73772..6635b87c5 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -90,12 +90,12 @@ are available while running tests: | flag | description | | ----------------- | ----------------------------------------------------------------------------- | | deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) | -| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) | -| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) | -| test-cephfs | Test cephFS csi driver as part of E2E (default: true) | +| deploy-cephfs | Deploy cephFS CSI driver as part of E2E (default: true) | +| deploy-rbd | Deploy rbd CSI driver as part of E2E (default: true) | +| test-cephfs | Test cephFS CSI driver as part of E2E (default: true) | | upgrade-testing | Perform upgrade testing (default: false) | | upgrade-version | Target version for upgrade testing (default: "v3.3.1") | -| test-rbd | Test rbd csi driver as part of E2E (default: true) | +| test-rbd | Test rbd CSI driver as part of E2E (default: true) | | cephcsi-namespace | The namespace in which cephcsi driver will be created (default: "default") | | rook-namespace | The namespace in which rook operator is installed (default: "rook-ceph") | | kubeconfig | Path to kubeconfig containing embedded authinfo (default: $HOME/.kube/config) | From 6043a308770316f13156583a8394146fba5a7445 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 11 Apr 2022 15:21:12 +0530 Subject: [PATCH 20/20] e2e: change the default release version of upgrade tests This commit correct the release version of upgrade tests from unsupported 3.3.1 to supported version. Signed-off-by: Humble Chirammal --- e2e/README.md | 2 +- e2e/e2e_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/README.md b/e2e/README.md index 6635b87c5..8dccda124 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -94,7 +94,7 @@ are available while running tests: | deploy-rbd | Deploy rbd CSI driver as part of E2E (default: true) | | test-cephfs | Test cephFS CSI driver as part of E2E (default: true) | | upgrade-testing | Perform upgrade testing (default: false) | -| upgrade-version | Target version for upgrade testing (default: "v3.3.1") | +| upgrade-version | Target version for upgrade testing (default: "v3.5.1") | | test-rbd | Test rbd CSI driver as part of E2E (default: true) | | cephcsi-namespace | The namespace in which cephcsi driver will be created (default: "default") | | rook-namespace | The namespace in which rook operator is installed (default: "rook-ceph") | diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index b53f09ce5..f9dc4a0a6 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -40,7 +40,7 @@ func init() { flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm") flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing") - flag.StringVar(&upgradeVersion, "upgrade-version", "v3.3.1", "target version for upgrade testing") + flag.StringVar(&upgradeVersion, "upgrade-version", "v3.5.1", "target version for upgrade testing") flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed") flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed") setDefaultKubeconfig()