From ac09c5553c652f4d788a57b02013d0b08c0962d8 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 25 Nov 2019 16:39:24 +0530 Subject: [PATCH] Add E2E for cephfs resize functionality Signed-off-by: Humble Chirammal --- Gopkg.lock | 1 + .../templates/provisioner-deployment.yaml | 19 +++ .../provisioner-rules-clusterrole.yaml | 9 +- charts/ceph-csi-cephfs/values.yaml | 2 +- charts/ceph-csi-rbd/values.yaml | 12 -- .../v1.13/csi-provisioner-rbac.yaml | 1 - .../v1.14+/csi-cephfsplugin-provisioner.yaml | 15 ++- .../v1.14+/csi-provisioner-rbac.yaml | 5 +- e2e/cephfs.go | 17 +++ e2e/utils.go | 110 +++++++++++++++++- examples/cephfs/storageclass.yaml | 3 + pkg/cephfs/controllerserver.go | 7 ++ scripts/minikube.sh | 2 +- 13 files changed, 184 insertions(+), 19 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 1ac3753f1..1bf8f2a46 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1374,6 +1374,7 @@ "k8s.io/api/core/v1", "k8s.io/api/storage/v1", "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/util/sets", "k8s.io/apimachinery/pkg/util/validation", diff --git a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml index d69c9682b..ebb1de53f 100644 --- a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml @@ -63,6 +63,25 @@ spec: mountPath: /csi resources: {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} +{{- end }} +{{- if semverCompare ">=1.15" .Capabilities.KubeVersion.GitVersion -}} +{{- if .Values.provisioner.resizer.enabled }} + - name: csi-resizer + image: "{{ .Values.provisioner.resizer.image.repository }}:{{ .Values.provisioner.resizer.image.tag }}" + imagePullPolicy: {{ .Values.provisioner.resizer.image.pullPolicy }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: "unix:///csi/{{ .Values.provisionerSocketFile }}" + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: +{{ toYaml .Values.provisioner.resizer.resources | indent 12 }} +{{- end }} {{- end }} - name: csi-cephfsplugin image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" diff --git a/charts/ceph-csi-cephfs/templates/provisioner-rules-clusterrole.yaml b/charts/ceph-csi-cephfs/templates/provisioner-rules-clusterrole.yaml index cde4edb2b..b19c8613a 100644 --- a/charts/ceph-csi-cephfs/templates/provisioner-rules-clusterrole.yaml +++ b/charts/ceph-csi-cephfs/templates/provisioner-rules-clusterrole.yaml @@ -19,7 +19,7 @@ rules: verbs: ["get", "list"] - apiGroups: [""] resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] + verbs: ["get", "list", "watch", "create", "delete","patch"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] @@ -37,4 +37,11 @@ rules: resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update", "patch"] {{- end -}} +{{- if semverCompare ">=1.15" .Capabilities.KubeVersion.GitVersion -}} +{{- if .Values.provisioner.resizer.enabled }} + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] +{{- end -}} +{{- end -}} {{- end -}} diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml index aa298ebda..c2127e0b9 100644 --- a/charts/ceph-csi-cephfs/values.yaml +++ b/charts/ceph-csi-cephfs/values.yaml @@ -203,7 +203,7 @@ provisioner: enabled: true image: repository: quay.io/k8scsi/csi-resizer - tag: canary + tag: v0.3.0 pullPolicy: IfNotPresent resources: {} diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml index aaa4475a1..582745da9 100644 --- a/charts/ceph-csi-rbd/values.yaml +++ b/charts/ceph-csi-rbd/values.yaml @@ -204,16 +204,6 @@ provisioner: tag: v1.2.1 pullPolicy: IfNotPresent resources: {} - - resizer: - name: resizer - enabled: true - image: - repository: quay.io/k8scsi/csi-resizer - tag: canary - pullPolicy: IfNotPresent - resources: {} - nodeSelector: {} @@ -221,8 +211,6 @@ provisioner: affinity: {} - - ######################################################### # Variables for 'internal' use please use with caution! # ######################################################### diff --git a/deploy/cephfs/kubernetes/v1.13/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/v1.13/csi-provisioner-rbac.yaml index dc67c1b39..5fcc16477 100644 --- a/deploy/cephfs/kubernetes/v1.13/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/v1.13/csi-provisioner-rbac.yaml @@ -46,7 +46,6 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] - --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin-provisioner.yaml index 9b060e059..0dcf9d7c7 100644 --- a/deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin-provisioner.yaml @@ -40,7 +40,7 @@ spec: args: - "--csi-address=$(ADDRESS)" - "--v=5" - - "--timeout=150s" + - "--timeout=60s" - "--enable-leader-election=true" - "--leader-election-type=leases" - "--retry-interval-start=500ms" @@ -51,6 +51,19 @@ spec: volumeMounts: - name: socket-dir mountPath: /csi + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.3.0 + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--leader-election" + env: + - name: ADDRESS + value: unix:///csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi - name: csi-cephfsplugin-attacher image: quay.io/k8scsi/csi-attacher:v1.2.0 args: diff --git a/deploy/cephfs/kubernetes/v1.14+/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/v1.14+/csi-provisioner-rbac.yaml index 4cbd8cad2..a41c6ad52 100644 --- a/deploy/cephfs/kubernetes/v1.14+/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/v1.14+/csi-provisioner-rbac.yaml @@ -33,7 +33,7 @@ rules: verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] + verbs: ["get", "list", "watch", "create", "delete", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] @@ -46,6 +46,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] --- kind: ClusterRoleBinding diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 9cf788121..d87cf997b 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -157,6 +157,23 @@ var _ = Describe("cephfs", func() { Fail(err.Error()) } }) + By("Resize PVC and check application directory size", func() { + v, err := f.ClientSet.Discovery().ServerVersion() + if err != nil { + e2elog.Logf("failed to get server version with error %v", err) + Fail(err.Error()) + } + + // Resize 0.3.0 is only supported from v1.15+ + if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { + err := resizePVCAndValidateSize(pvcPath, appPath, f) + if err != nil { + e2elog.Logf("failed to resize PVC %v", err) + Fail(err.Error()) + } + } + + }) }) diff --git a/e2e/utils.go b/e2e/utils.go index 045f3eb07..6b4fc8c07 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -18,6 +18,7 @@ import ( v1 "k8s.io/api/core/v1" scv1 "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -797,8 +798,8 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { if pvc == nil { return err } + pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) app, err := loadApp(appPath) if err != nil { @@ -839,3 +840,110 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { err = deletePVCAndApp("", f, pvc, app) return err } + +func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error { + pvcName := pvc.Name + updatedPVC := pvc.DeepCopy() + var err error + + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvcName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error fetching pvc %q with %v", pvcName, err) + } + timeout := time.Duration(t) * time.Minute + + updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size) + _, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Update(updatedPVC) + Expect(err).Should(BeNil()) + + start := time.Now() + e2elog.Logf("Waiting up to %v to be in Resized state", pvc) + return wait.PollImmediate(poll, timeout, func() (bool, error) { + e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", updatedPVC.Name, int(time.Since(start).Seconds())) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Get(pvcName, metav1.GetOptions{}) + if err != nil { + e2elog.Logf("Error getting pvc in namespace: '%s': %v", updatedPVC.Namespace, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + pvcConditions := updatedPVC.Status.Conditions + if len(pvcConditions) > 0 { + if pvcConditions[0].Type == v1.PersistentVolumeClaimResizing { + return true, nil + } + e2elog.Logf("pvc state %v", pvcConditions[0].Type) + } + return false, nil + }) +} + +func resizePVCAndValidateSize(pvcPath, appPath string, f *framework.Framework) error { + pvc, err := loadPVC(pvcPath) + if pvc == nil { + return err + } + pvc.Namespace = f.UniqueName + + resizePvc, err := loadPVC(pvcPath) + if resizePvc == nil { + return err + } + resizePvc.Namespace = f.UniqueName + + app, err := loadApp(appPath) + if err != nil { + return err + } + app.Labels = map[string]string{"app": "resize-pvc"} + app.Namespace = f.UniqueName + + err = createPVCAndApp("", f, pvc, app) + if err != nil { + return err + } + + opt := metav1.ListOptions{ + LabelSelector: "app=resize-pvc", + } + + err = checkDirSize(app, f, &opt, "5.0G", deployTimeout) + if err != nil { + return err + } + // resize PVC + err = expandPVCSize(f.ClientSet, resizePvc, "10Gi", deployTimeout) + if err != nil { + return err + } + // wait for application pod to come up after resize + err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout) + if err != nil { + return err + } + err = checkDirSize(app, f, &opt, "10G", deployTimeout) + return err +} + +func checkDirSize(app *v1.Pod, f *framework.Framework, opt *metav1.ListOptions, size string, t int) error { + dirPath := app.Spec.Containers[0].VolumeMounts[0].MountPath + timeout := time.Duration(t) * time.Minute + start := time.Now() + + return wait.PollImmediate(poll, timeout, func() (bool, error) { + e2elog.Logf("checking directory size %s (%d seconds elapsed)", dirPath, int(time.Since(start).Seconds())) + output, stdErr := execCommandInPod(f, fmt.Sprintf("df -h|grep %s |awk '{print $2}'", dirPath), app.Namespace, opt) + + if stdErr != "" { + e2elog.Logf("failed to execute command in app pod %v", stdErr) + return false, nil + } + + if !strings.Contains(output, size) { + e2elog.Logf("expected directory size %s found %s information", size, output) + return false, nil + } + return true, nil + }) +} diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index 30731b317..54da21d5a 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -31,6 +31,8 @@ parameters: # The secrets have to contain user and/or Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret csi.storage.k8s.io/provisioner-secret-namespace: default + csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret + csi.storage.k8s.io/controller-expand-secret-namespace: default csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret csi.storage.k8s.io/node-stage-secret-namespace: default @@ -40,5 +42,6 @@ parameters: # determined by probing for ceph-fuse and mount.ceph # mounter: kernel reclaimPolicy: Delete +allowVolumeExpansion: true mountOptions: - debug diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 776c322b6..1b7a794e5 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -304,6 +304,13 @@ func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi volID := req.GetVolumeId() secret := req.GetSecrets() + // lock out parallel delete operations + if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired { + klog.Infof(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID) + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) + } + defer cs.VolumeLocks.Release(volID) + cr, err := util.NewAdminCredentials(secret) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) diff --git a/scripts/minikube.sh b/scripts/minikube.sh index 697e8fb31..588cd18ef 100755 --- a/scripts/minikube.sh +++ b/scripts/minikube.sh @@ -63,7 +63,7 @@ CEPHCSI_IMAGE_REPO=${CEPHCSI_IMAGE_REPO:-"quay.io/cephcsi"} K8S_IMAGE_REPO=${K8S_IMAGE_REPO:-"quay.io/k8scsi"} #feature-gates for kube -K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true"} +K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true,ExpandInUsePersistentVolumes=true"} case "${1:-}" in up)