mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 06:10:22 +00:00
e2e: add test cases for pv.Spec.MountOptions
Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
parent
c0201e493b
commit
54eeac212e
@ -318,6 +318,25 @@ var _ = Describe(cephfsType, func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
cephFSDeamonSetName, cephFSContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("verify generic ephemeral volume support", func() {
|
By("verify generic ephemeral volume support", func() {
|
||||||
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -66,6 +66,12 @@ func createCephfsStorageClass(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// TODO: remove this once the ceph-csi driver release-v3.9 is completed
|
||||||
|
// and upgrade tests are done from v3.9 to devel.
|
||||||
|
// The mountOptions from previous are not compatible with NodeStageVolume
|
||||||
|
// request.
|
||||||
|
sc.MountOptions = []string{}
|
||||||
|
|
||||||
sc.Parameters["fsName"] = fileSystemName
|
sc.Parameters["fsName"] = fileSystemName
|
||||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace
|
||||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName
|
||||||
|
19
e2e/nfs.go
19
e2e/nfs.go
@ -43,6 +43,7 @@ var (
|
|||||||
nfsRookCephNFS = "rook-nfs.yaml"
|
nfsRookCephNFS = "rook-nfs.yaml"
|
||||||
nfsDeploymentName = "csi-nfsplugin-provisioner"
|
nfsDeploymentName = "csi-nfsplugin-provisioner"
|
||||||
nfsDeamonSetName = "csi-nfsplugin"
|
nfsDeamonSetName = "csi-nfsplugin"
|
||||||
|
nfsContainerName = "csi-nfsplugin"
|
||||||
nfsDirPath = "../deploy/nfs/kubernetes/"
|
nfsDirPath = "../deploy/nfs/kubernetes/"
|
||||||
nfsExamplePath = examplePath + "nfs/"
|
nfsExamplePath = examplePath + "nfs/"
|
||||||
nfsPoolName = ".nfs"
|
nfsPoolName = ".nfs"
|
||||||
@ -363,6 +364,24 @@ var _ = Describe("nfs", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
nfsDeamonSetName, nfsContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("verify RWOP volume support", func() {
|
By("verify RWOP volume support", func() {
|
||||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
107
e2e/pod.go
107
e2e/pod.go
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/conditions"
|
"k8s.io/kubernetes/pkg/client/conditions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
)
|
)
|
||||||
|
|
||||||
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
|
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
|
||||||
@ -164,6 +165,28 @@ func execCommandInDaemonsetPod(
|
|||||||
f *framework.Framework,
|
f *framework.Framework,
|
||||||
c, daemonsetName, nodeName, containerName, ns string,
|
c, daemonsetName, nodeName, containerName, ns string,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
|
podName, err := getDaemonsetPodOnNode(f, daemonsetName, nodeName, ns)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := []string{"/bin/sh", "-c", c}
|
||||||
|
podOpt := e2epod.ExecOptions{
|
||||||
|
Command: cmd,
|
||||||
|
Namespace: ns,
|
||||||
|
PodName: podName,
|
||||||
|
ContainerName: containerName,
|
||||||
|
CaptureStdout: true,
|
||||||
|
CaptureStderr: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_ /* stdout */, stderr, err := execWithRetry(f, &podOpt)
|
||||||
|
|
||||||
|
return stderr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDaemonsetPodOnNode returns the name of a daemonset pod on a particular node.
|
||||||
|
func getDaemonsetPodOnNode(f *framework.Framework, daemonsetName, nodeName, ns string) (string, error) {
|
||||||
selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName)
|
selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -187,19 +210,7 @@ func execCommandInDaemonsetPod(
|
|||||||
return "", fmt.Errorf("%s daemonset pod on node %s in namespace %s not found", daemonsetName, nodeName, ns)
|
return "", fmt.Errorf("%s daemonset pod on node %s in namespace %s not found", daemonsetName, nodeName, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := []string{"/bin/sh", "-c", c}
|
return podName, nil
|
||||||
podOpt := e2epod.ExecOptions{
|
|
||||||
Command: cmd,
|
|
||||||
Namespace: ns,
|
|
||||||
PodName: podName,
|
|
||||||
ContainerName: containerName,
|
|
||||||
CaptureStdout: true,
|
|
||||||
CaptureStderr: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
_ /* stdout */, stderr, err := execWithRetry(f, &podOpt)
|
|
||||||
|
|
||||||
return stderr, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// listPods returns slice of pods matching given ListOptions and namespace.
|
// listPods returns slice of pods matching given ListOptions and namespace.
|
||||||
@ -542,3 +553,73 @@ func validateRWOPPodCreation(
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// verifySeLinuxMountOption verifies the SeLinux context MountOption added to PV.Spec.MountOption
|
||||||
|
// is successfully used by nodeplugin during mounting by checking for its presence in the
|
||||||
|
// nodeplugin container logs.
|
||||||
|
func verifySeLinuxMountOption(
|
||||||
|
f *framework.Framework,
|
||||||
|
pvcPath, appPath, daemonSetName, cn, ns string,
|
||||||
|
) error {
|
||||||
|
mountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
|
||||||
|
|
||||||
|
// create PVC
|
||||||
|
pvc, err := loadPVC(pvcPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load pvc: %w", err)
|
||||||
|
}
|
||||||
|
pvc.Namespace = f.UniqueName
|
||||||
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create PVC: %w", err)
|
||||||
|
}
|
||||||
|
// modify PV spec.MountOptions
|
||||||
|
pv, err := getBoundPV(f.ClientSet, pvc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get PV: %w", err)
|
||||||
|
}
|
||||||
|
pv.Spec.MountOptions = []string{mountOption}
|
||||||
|
|
||||||
|
// update PV
|
||||||
|
_, err = f.ClientSet.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to update pv: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app, err := loadApp(appPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load application: %w", err)
|
||||||
|
}
|
||||||
|
app.Namespace = f.UniqueName
|
||||||
|
err = createApp(f.ClientSet, app, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create application: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), app.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("Error occurred getting pod %s in namespace %s", app.Name, f.UniqueName)
|
||||||
|
|
||||||
|
return fmt.Errorf("failed to get pod: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodepluginPodName, err := getDaemonsetPodOnNode(f, daemonSetName, pod.Spec.NodeName, ns)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get daemonset pod on node: %w", err)
|
||||||
|
}
|
||||||
|
logs, err := frameworkPod.GetPodLogs(context.TODO(), f.ClientSet, ns, nodepluginPodName, cn)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get pod logs from container %s/%s/%s : %w", ns, nodepluginPodName, cn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(logs, mountOption) {
|
||||||
|
return fmt.Errorf("mount option %s not found in logs: %s", mountOption, logs)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete PVC and application: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -51,6 +51,7 @@ var (
|
|||||||
e2eTemplatesPath = "../e2e/templates/"
|
e2eTemplatesPath = "../e2e/templates/"
|
||||||
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
||||||
rbdDaemonsetName = "csi-rbdplugin"
|
rbdDaemonsetName = "csi-rbdplugin"
|
||||||
|
rbdContainerName = "csi-rbdplugin"
|
||||||
defaultRBDPool = "replicapool"
|
defaultRBDPool = "replicapool"
|
||||||
erasureCodedPool = "ec-pool"
|
erasureCodedPool = "ec-pool"
|
||||||
noDataPool = ""
|
noDataPool = ""
|
||||||
@ -443,6 +444,14 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
rbdDaemonsetName, rbdContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("create a PVC and check PVC/PV metadata on RBD image", func() {
|
By("create a PVC and check PVC/PV metadata on RBD image", func() {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -65,5 +65,5 @@ parameters:
|
|||||||
|
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
mountOptions:
|
# mountOptions:
|
||||||
- debug
|
# - context="system_u:object_r:container_file_t:s0:c0,c1"
|
||||||
|
@ -15,5 +15,3 @@ parameters:
|
|||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
mountOptions:
|
|
||||||
- debug
|
|
||||||
|
Loading…
Reference in New Issue
Block a user