diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 1a2388301..8e558170b 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -318,6 +318,25 @@ var _ = Describe(cephfsType, func() { } }) } + + By("verify mountOptions support", func() { + err := createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + + err = verifySeLinuxMountOption(f, pvcPath, appPath, + cephFSDeamonSetName, cephFSContainerName, cephCSINamespace) + if err != nil { + framework.Failf("failed to verify mount options: %v", err) + } + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + }) + By("verify generic ephemeral volume support", func() { err := createCephfsStorageClass(f.ClientSet, f, true, nil) if err != nil { diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index b2b2e5596..81df42ca8 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -66,6 +66,12 @@ func createCephfsStorageClass( if err != nil { return err } + // TODO: remove this once the ceph-csi driver release-v3.9 is completed + // and upgrade tests are done from v3.9 to devel. + // The mountOptions from previous are not compatible with NodeStageVolume + // request. + sc.MountOptions = []string{} + sc.Parameters["fsName"] = fileSystemName sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName diff --git a/e2e/nfs.go b/e2e/nfs.go index d06e06e4e..054558e49 100644 --- a/e2e/nfs.go +++ b/e2e/nfs.go @@ -43,6 +43,7 @@ var ( nfsRookCephNFS = "rook-nfs.yaml" nfsDeploymentName = "csi-nfsplugin-provisioner" nfsDeamonSetName = "csi-nfsplugin" + nfsContainerName = "csi-nfsplugin" nfsDirPath = "../deploy/nfs/kubernetes/" nfsExamplePath = examplePath + "nfs/" nfsPoolName = ".nfs" @@ -363,6 +364,24 @@ var _ = Describe("nfs", func() { } }) + By("verify mountOptions support", func() { + err := createNFSStorageClass(f.ClientSet, f, false, nil) + if err != nil { + framework.Failf("failed to create NFS storageclass: %v", err) + } + + err = verifySeLinuxMountOption(f, pvcPath, appPath, + nfsDeamonSetName, nfsContainerName, cephCSINamespace) + if err != nil { + framework.Failf("failed to verify mount options: %v", err) + } + + err = deleteResource(nfsExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete NFS storageclass: %v", err) + } + }) + By("verify RWOP volume support", func() { err := createNFSStorageClass(f.ClientSet, f, false, nil) if err != nil { diff --git a/e2e/pod.go b/e2e/pod.go index e25430268..427189b58 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod" ) const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode." @@ -164,6 +165,28 @@ func execCommandInDaemonsetPod( f *framework.Framework, c, daemonsetName, nodeName, containerName, ns string, ) (string, error) { + podName, err := getDaemonsetPodOnNode(f, daemonsetName, nodeName, ns) + if err != nil { + return "", err + } + + cmd := []string{"/bin/sh", "-c", c} + podOpt := e2epod.ExecOptions{ + Command: cmd, + Namespace: ns, + PodName: podName, + ContainerName: containerName, + CaptureStdout: true, + CaptureStderr: true, + } + + _ /* stdout */, stderr, err := execWithRetry(f, &podOpt) + + return stderr, err +} + +// getDaemonsetPodOnNode returns the name of a daemonset pod on a particular node. +func getDaemonsetPodOnNode(f *framework.Framework, daemonsetName, nodeName, ns string) (string, error) { selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName) if err != nil { return "", err @@ -187,19 +210,7 @@ func execCommandInDaemonsetPod( return "", fmt.Errorf("%s daemonset pod on node %s in namespace %s not found", daemonsetName, nodeName, ns) } - cmd := []string{"/bin/sh", "-c", c} - podOpt := e2epod.ExecOptions{ - Command: cmd, - Namespace: ns, - PodName: podName, - ContainerName: containerName, - CaptureStdout: true, - CaptureStderr: true, - } - - _ /* stdout */, stderr, err := execWithRetry(f, &podOpt) - - return stderr, err + return podName, nil } // listPods returns slice of pods matching given ListOptions and namespace. @@ -542,3 +553,73 @@ func validateRWOPPodCreation( return nil } + +// verifySeLinuxMountOption verifies the SeLinux context MountOption added to PV.Spec.MountOption +// is successfully used by nodeplugin during mounting by checking for its presence in the +// nodeplugin container logs. +func verifySeLinuxMountOption( + f *framework.Framework, + pvcPath, appPath, daemonSetName, cn, ns string, +) error { + mountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\"" + + // create PVC + pvc, err := loadPVC(pvcPath) + if err != nil { + return fmt.Errorf("failed to load pvc: %w", err) + } + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + return fmt.Errorf("failed to create PVC: %w", err) + } + // modify PV spec.MountOptions + pv, err := getBoundPV(f.ClientSet, pvc) + if err != nil { + return fmt.Errorf("failed to get PV: %w", err) + } + pv.Spec.MountOptions = []string{mountOption} + + // update PV + _, err = f.ClientSet.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update pv: %w", err) + } + + app, err := loadApp(appPath) + if err != nil { + return fmt.Errorf("failed to load application: %w", err) + } + app.Namespace = f.UniqueName + err = createApp(f.ClientSet, app, deployTimeout) + if err != nil { + return fmt.Errorf("failed to create application: %w", err) + } + + pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), app.Name, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error occurred getting pod %s in namespace %s", app.Name, f.UniqueName) + + return fmt.Errorf("failed to get pod: %w", err) + } + + nodepluginPodName, err := getDaemonsetPodOnNode(f, daemonSetName, pod.Spec.NodeName, ns) + if err != nil { + return fmt.Errorf("failed to get daemonset pod on node: %w", err) + } + logs, err := frameworkPod.GetPodLogs(context.TODO(), f.ClientSet, ns, nodepluginPodName, cn) + if err != nil { + return fmt.Errorf("failed to get pod logs from container %s/%s/%s : %w", ns, nodepluginPodName, cn, err) + } + + if !strings.Contains(logs, mountOption) { + return fmt.Errorf("mount option %s not found in logs: %s", mountOption, logs) + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + return fmt.Errorf("failed to delete PVC and application: %w", err) + } + + return nil +} diff --git a/e2e/rbd.go b/e2e/rbd.go index 6bac023da..57d0c1b79 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -51,6 +51,7 @@ var ( e2eTemplatesPath = "../e2e/templates/" rbdDeploymentName = "csi-rbdplugin-provisioner" rbdDaemonsetName = "csi-rbdplugin" + rbdContainerName = "csi-rbdplugin" defaultRBDPool = "replicapool" erasureCodedPool = "ec-pool" noDataPool = "" @@ -443,6 +444,14 @@ var _ = Describe("RBD", func() { }) } + By("verify mountOptions support", func() { + err := verifySeLinuxMountOption(f, pvcPath, appPath, + rbdDaemonsetName, rbdContainerName, cephCSINamespace) + if err != nil { + framework.Failf("failed to verify mount options: %v", err) + } + }) + By("create a PVC and check PVC/PV metadata on RBD image", func() { pvc, err := loadPVC(pvcPath) if err != nil { diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index 4c16bc2e7..fd3aa1649 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -65,5 +65,5 @@ parameters: reclaimPolicy: Delete allowVolumeExpansion: true -mountOptions: - - debug +# mountOptions: +# - context="system_u:object_r:container_file_t:s0:c0,c1" diff --git a/scripts/k8s-storage/sc-cephfs.yaml.in b/scripts/k8s-storage/sc-cephfs.yaml.in index 020daf9ed..2f57a78c9 100644 --- a/scripts/k8s-storage/sc-cephfs.yaml.in +++ b/scripts/k8s-storage/sc-cephfs.yaml.in @@ -15,5 +15,3 @@ parameters: csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph reclaimPolicy: Delete allowVolumeExpansion: true -mountOptions: - - debug