mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 22:30:23 +00:00
Add e2e tests for topology based provisioning
- This commit adds tests only for RBD, as CephFS still needs an enhancement in CephFS subvolume commands to effectively use topology based provisioning Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
parent
d7ea523540
commit
e73921f268
@ -31,12 +31,14 @@ cluster.
|
|||||||
the following parameters are available to configure kubernetes cluster
|
the following parameters are available to configure kubernetes cluster
|
||||||
|
|
||||||
| flag | description |
|
| flag | description |
|
||||||
| ----------- | ------------------------------------------------------------- |
|
| ----------------- | ------------------------------------------------------------- |
|
||||||
| up | Starts a local kubernetes cluster and prepare a disk for rook |
|
| up | Starts a local kubernetes cluster and prepare a disk for rook |
|
||||||
| down | Stops a running local kubernetes cluster |
|
| down | Stops a running local kubernetes cluster |
|
||||||
| clean | Deletes a local kubernetes cluster |
|
| clean | Deletes a local kubernetes cluster |
|
||||||
| ssh | Log into or run a command on a minikube machine with SSH |
|
| ssh | Log into or run a command on a minikube machine with SSH |
|
||||||
| deploy-rook | Deploy rook to minikube |
|
| deploy-rook | Deploy rook to minikube |
|
||||||
|
| create-block-pool | Creates a rook block pool (named $ROOK_BLOCK_POOL_NAME) |
|
||||||
|
| delete-block-pool | Deletes a rook block pool (named $ROOK_BLOCK_POOL_NAME) |
|
||||||
| clean-rook | Deletes a rook from minikube |
|
| clean-rook | Deletes a rook from minikube |
|
||||||
| cephcsi | Copy built docker images to kubernetes cluster |
|
| cephcsi | Copy built docker images to kubernetes cluster |
|
||||||
| k8s-sidecar | Copy kubernetes sidecar docker images to kubernetes cluster |
|
| k8s-sidecar | Copy kubernetes sidecar docker images to kubernetes cluster |
|
||||||
@ -52,6 +54,7 @@ following environment variables can be exported to customize kubernetes deployme
|
|||||||
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
|
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
|
||||||
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
|
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
|
||||||
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
|
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
|
||||||
|
| ROOK_BLOCK_POOL_NAME | Block pool name to create in the rook instance | newrbdpool |
|
||||||
|
|
||||||
- creating kubernetes cluster
|
- creating kubernetes cluster
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
// create pvc and app
|
// create pvc and app
|
||||||
for i := 0; i < totalCount; i++ {
|
for i := 0; i < totalCount; i++ {
|
||||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
err := createPVCAndApp(name, f, pvc, app)
|
err := createPVCAndApp(name, f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
104
e2e/rbd.go
104
e2e/rbd.go
@ -22,6 +22,14 @@ var (
|
|||||||
rbdExamplePath = "../examples/rbd/"
|
rbdExamplePath = "../examples/rbd/"
|
||||||
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
||||||
rbdDaemonsetName = "csi-rbdplugin"
|
rbdDaemonsetName = "csi-rbdplugin"
|
||||||
|
// Topology related variables
|
||||||
|
nodeRegionLabel = "test.failure-domain/region"
|
||||||
|
regionValue = "testregion"
|
||||||
|
nodeZoneLabel = "test.failure-domain/zone"
|
||||||
|
zoneValue = "testzone"
|
||||||
|
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
|
||||||
|
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
|
||||||
|
rbdTopologyPool = "newrbdpool"
|
||||||
)
|
)
|
||||||
|
|
||||||
func deployRBDPlugin() {
|
func deployRBDPlugin() {
|
||||||
@ -83,9 +91,13 @@ func createORDeleteRbdResouces(action string) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePlugin, err)
|
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePlugin, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
domainLabel := nodeRegionLabel + "," + nodeZoneLabel
|
||||||
|
data = addTopologyDomainsToDSYaml(data, domainLabel)
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s nodeplugin %v", action, err)
|
e2elog.Logf("failed to %s nodeplugin %v", action, err)
|
||||||
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC)
|
data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC)
|
||||||
@ -113,6 +125,8 @@ var _ = Describe("RBD", func() {
|
|||||||
// deploy RBD CSI
|
// deploy RBD CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
|
createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||||
|
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||||
if deployRBD {
|
if deployRBD {
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := createNamespace(c, cephCSINamespace)
|
err := createNamespace(c, cephCSINamespace)
|
||||||
@ -123,7 +137,7 @@ var _ = Describe("RBD", func() {
|
|||||||
deployRBDPlugin()
|
deployRBDPlugin()
|
||||||
}
|
}
|
||||||
createConfigMap(rbdDirPath, f.ClientSet, f)
|
createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
createRBDSecret(f.ClientSet, f)
|
createRBDSecret(f.ClientSet, f)
|
||||||
deployVault(f.ClientSet, deployTimeout)
|
deployVault(f.ClientSet, deployTimeout)
|
||||||
})
|
})
|
||||||
@ -152,6 +166,11 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
deleteNodeLabel(c, nodeRegionLabel)
|
||||||
|
deleteNodeLabel(c, nodeZoneLabel)
|
||||||
|
// Remove the CSI labels that get added
|
||||||
|
deleteNodeLabel(c, nodeCSIRegionLabel)
|
||||||
|
deleteNodeLabel(c, nodeCSIZoneLabel)
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Test RBD CSI", func() {
|
Context("Test RBD CSI", func() {
|
||||||
@ -164,18 +183,19 @@ var _ = Describe("RBD", func() {
|
|||||||
// appClonePath := rbdExamplePath + "pod-restore.yaml"
|
// appClonePath := rbdExamplePath + "pod-restore.yaml"
|
||||||
// snapshotPath := rbdExamplePath + "snapshot.yaml"
|
// snapshotPath := rbdExamplePath + "snapshot.yaml"
|
||||||
|
|
||||||
By("checking provisioner deployment is running")
|
By("checking provisioner deployment is running", func() {
|
||||||
var err error
|
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("checking nodeplugin deamonsets is running")
|
By("checking nodeplugin deamonsets is running", func() {
|
||||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("create a PVC and Bind it to an app", func() {
|
By("create a PVC and Bind it to an app", func() {
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
@ -187,18 +207,18 @@ var _ = Describe("RBD", func() {
|
|||||||
|
|
||||||
By("create a PVC and Bind it to an app with ext4 as the FS ", func() {
|
By("create a PVC and Bind it to an app with ext4 as the FS ", func() {
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, map[string]string{"csi.storage.k8s.io/fstype": "ext4"})
|
createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "ext4"})
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC and Bind it to an app with encrypted RBD volume", func() {
|
By("create a PVC and Bind it to an app with encrypted RBD volume", func() {
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, map[string]string{"encrypted": "true"})
|
createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"encrypted": "true"})
|
||||||
validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f)
|
validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f)
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC and Bind it to an app with encrypted RBD volume with Vault KMS", func() {
|
By("create a PVC and Bind it to an app with encrypted RBD volume with Vault KMS", func() {
|
||||||
@ -207,10 +227,10 @@ var _ = Describe("RBD", func() {
|
|||||||
"encrypted": "true",
|
"encrypted": "true",
|
||||||
"encryptionKMSID": "vault-test",
|
"encryptionKMSID": "vault-test",
|
||||||
}
|
}
|
||||||
createRBDStorageClass(f.ClientSet, f, scOpts)
|
createRBDStorageClass(f.ClientSet, f, nil, scOpts)
|
||||||
validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vault", f)
|
validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vault", f)
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
// skipping snapshot testing
|
// skipping snapshot testing
|
||||||
@ -284,7 +304,7 @@ var _ = Describe("RBD", func() {
|
|||||||
// create pvc and app
|
// create pvc and app
|
||||||
for i := 0; i < totalCount; i++ {
|
for i := 0; i < totalCount; i++ {
|
||||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
err := createPVCAndApp(name, f, pvc, app)
|
err := createPVCAndApp(name, f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
@ -293,7 +313,7 @@ var _ = Describe("RBD", func() {
|
|||||||
// validate created backend rbd images
|
// validate created backend rbd images
|
||||||
images := listRBDImages(f)
|
images := listRBDImages(f)
|
||||||
if len(images) != totalCount {
|
if len(images) != totalCount {
|
||||||
e2elog.Logf("backend image creation not matching pvc count, image count = %d pvc count %d images found = %+v", len(images), totalCount, images)
|
e2elog.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount)
|
||||||
Fail("validate multiple pvc failed")
|
Fail("validate multiple pvc failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,7 +358,7 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, map[string]string{"csi.storage.k8s.io/fstype": "xfs"})
|
createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "xfs"})
|
||||||
err = resizePVCAndValidateSize(pvcPath, appPath, f)
|
err = resizePVCAndValidateSize(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to resize filesystem PVC %v", err)
|
e2elog.Logf("failed to resize filesystem PVC %v", err)
|
||||||
@ -377,7 +397,7 @@ var _ = Describe("RBD", func() {
|
|||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
err = createPVCAndApp("", f, pvc, app)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
@ -402,7 +422,7 @@ var _ = Describe("RBD", func() {
|
|||||||
By("create PVC in storageClass with volumeNamePrefix", func() {
|
By("create PVC in storageClass with volumeNamePrefix", func() {
|
||||||
volumeNamePrefix := "foo-bar-"
|
volumeNamePrefix := "foo-bar-"
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, map[string]string{"volumeNamePrefix": volumeNamePrefix})
|
createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"volumeNamePrefix": volumeNamePrefix})
|
||||||
|
|
||||||
// set up PVC
|
// set up PVC
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
@ -431,7 +451,7 @@ var _ = Describe("RBD", func() {
|
|||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
|
|
||||||
if !foundIt {
|
if !foundIt {
|
||||||
Fail(fmt.Sprintf("could not find image with prefix %s", volumeNamePrefix))
|
Fail(fmt.Sprintf("could not find image with prefix %s", volumeNamePrefix))
|
||||||
@ -460,6 +480,54 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("creating an app with a PVC, using a topology constrained StorageClass", func() {
|
||||||
|
By("checking node has required CSI topology labels set", func() {
|
||||||
|
checkNodeHasLabel(f.ClientSet, nodeCSIRegionLabel, regionValue)
|
||||||
|
checkNodeHasLabel(f.ClientSet, nodeCSIZoneLabel, zoneValue)
|
||||||
|
})
|
||||||
|
|
||||||
|
By("creating a StorageClass with delayed binding mode and CSI topology parameter")
|
||||||
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"domainSegments\":" +
|
||||||
|
"[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," +
|
||||||
|
"{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]"
|
||||||
|
createRBDStorageClass(f.ClientSet, f,
|
||||||
|
map[string]string{"volumeBindingMode": "WaitForFirstConsumer"},
|
||||||
|
map[string]string{"topologyConstrainedPools": topologyConstraint})
|
||||||
|
|
||||||
|
By("creating an app using a PV from the delayed binding mode StorageClass")
|
||||||
|
pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, 0)
|
||||||
|
|
||||||
|
By("ensuring created PV has required node selector values populated")
|
||||||
|
checkPVSelectorValuesForPVC(f, pvc)
|
||||||
|
|
||||||
|
By("ensuring created PV has its image in the topology specific pool")
|
||||||
|
err := checkPVCImageInPool(f, pvc, rbdTopologyPool)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
By("ensuring created PV has its image journal in the topology specific pool")
|
||||||
|
err = checkPVCImageJournalInPool(f, pvc, rbdTopologyPool)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
By("ensuring created PV has its CSI journal in the CSI journal specific pool")
|
||||||
|
err = checkPVCCSIJournalInPool(f, pvc, "replicapool")
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanup and undo changes made by the test
|
||||||
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
|
})
|
||||||
|
|
||||||
// Make sure this should be last testcase in this file, because
|
// Make sure this should be last testcase in this file, because
|
||||||
// it deletes pool
|
// it deletes pool
|
||||||
By("Create a PVC and Delete PVC when backend pool deleted", func() {
|
By("Create a PVC and Delete PVC when backend pool deleted", func() {
|
||||||
|
@ -84,7 +84,7 @@ func resizePVCAndValidateSize(pvcPath, appPath string, f *framework.Framework) e
|
|||||||
app.Labels = map[string]string{"app": "resize-pvc"}
|
app.Labels = map[string]string{"app": "resize-pvc"}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
|
|
||||||
err = createPVCAndApp("", f, pvc, app)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
204
e2e/utils.go
204
e2e/utils.go
@ -294,7 +294,7 @@ func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, en
|
|||||||
Expect(err).Should(BeNil())
|
Expect(err).Should(BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, parameters map[string]string) {
|
func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOptions, parameters map[string]string) {
|
||||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
|
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
|
||||||
sc := getStorageClass(scPath)
|
sc := getStorageClass(scPath)
|
||||||
sc.Parameters["pool"] = "replicapool"
|
sc.Parameters["pool"] = "replicapool"
|
||||||
@ -320,6 +320,11 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, param
|
|||||||
sc.Parameters[k] = v
|
sc.Parameters[k] = v
|
||||||
}
|
}
|
||||||
sc.Namespace = cephCSINamespace
|
sc.Namespace = cephCSINamespace
|
||||||
|
|
||||||
|
if scOptions["volumeBindingMode"] == "WaitForFirstConsumer" {
|
||||||
|
value := scv1.VolumeBindingWaitForFirstConsumer
|
||||||
|
sc.VolumeBindingMode = &value
|
||||||
|
}
|
||||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).Should(BeNil())
|
||||||
}
|
}
|
||||||
@ -506,6 +511,9 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
var err error
|
var err error
|
||||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).Should(BeNil())
|
||||||
|
if timeout == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
name := pvc.Name
|
name := pvc.Name
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)
|
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||||
@ -664,13 +672,13 @@ func unmarshal(fileName string, obj interface{}) error {
|
|||||||
|
|
||||||
// createPVCAndApp creates pvc and pod
|
// createPVCAndApp creates pvc and pod
|
||||||
// if name is not empty same will be set as pvc and app name
|
// if name is not empty same will be set as pvc and app name
|
||||||
func createPVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
|
func createPVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod, pvcTimeout int) error {
|
||||||
if name != "" {
|
if name != "" {
|
||||||
pvc.Name = name
|
pvc.Name = name
|
||||||
app.Name = name
|
app.Name = name
|
||||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
|
||||||
}
|
}
|
||||||
err := createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -695,7 +703,7 @@ func deletePVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolu
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) (*v1.PersistentVolumeClaim, *v1.Pod) {
|
func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework, pvcTimeout int) (*v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if pvc == nil {
|
if pvc == nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
@ -709,7 +717,7 @@ func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) (*v
|
|||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
|
|
||||||
err = createPVCAndApp("", f, pvc, app)
|
err = createPVCAndApp("", f, pvc, app, pvcTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
@ -718,29 +726,46 @@ func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) (*v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) {
|
func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) {
|
||||||
pvc, app := createPVCAndAppBinding(pvcPath, appPath, f)
|
pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||||
err := deletePVCAndApp("", f, pvc, app)
|
err := deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (string, string, error) {
|
type imageInfoFromPVC struct {
|
||||||
|
imageID string
|
||||||
|
imageName string
|
||||||
|
csiVolumeHandle string
|
||||||
|
pvName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// getImageInfoFromPVC reads volume handle of the bound PV to the passed in PVC,
|
||||||
|
// and returns imageInfoFromPVC or error
|
||||||
|
func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (imageInfoFromPVC, error) {
|
||||||
|
var imageData imageInfoFromPVC
|
||||||
|
|
||||||
c := f.ClientSet.CoreV1()
|
c := f.ClientSet.CoreV1()
|
||||||
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return imageData, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return imageData, err
|
||||||
}
|
}
|
||||||
|
|
||||||
imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
|
imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
|
||||||
imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle)
|
imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle)
|
||||||
|
|
||||||
return fmt.Sprintf("csi-vol-%s", imageID), pv.Spec.CSI.VolumeHandle, nil
|
imageData = imageInfoFromPVC{
|
||||||
|
imageID: imageID,
|
||||||
|
imageName: fmt.Sprintf("csi-vol-%s", imageID),
|
||||||
|
csiVolumeHandle: pv.Spec.CSI.VolumeHandle,
|
||||||
|
pvName: pv.Name,
|
||||||
|
}
|
||||||
|
return imageData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) {
|
func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) {
|
||||||
@ -785,13 +810,13 @@ func readVaultSecret(key string, f *framework.Framework) (string, string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) {
|
func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) {
|
||||||
pvc, app := createPVCAndAppBinding(pvcPath, appPath, f)
|
pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||||
|
|
||||||
rbdImageID, rbdImageHandle, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
rbdImageSpec := fmt.Sprintf("replicapool/%s", rbdImageID)
|
rbdImageSpec := fmt.Sprintf("replicapool/%s", imageData.imageName)
|
||||||
encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f)
|
encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
@ -807,7 +832,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor
|
|||||||
|
|
||||||
if kms == "vault" {
|
if kms == "vault" {
|
||||||
// check new passphrase created
|
// check new passphrase created
|
||||||
_, stdErr := readVaultSecret(rbdImageHandle, f)
|
_, stdErr := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||||
if stdErr != "" {
|
if stdErr != "" {
|
||||||
Fail(fmt.Sprintf("failed to read passphrase from vault: %s", stdErr))
|
Fail(fmt.Sprintf("failed to read passphrase from vault: %s", stdErr))
|
||||||
}
|
}
|
||||||
@ -820,7 +845,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor
|
|||||||
|
|
||||||
if kms == "vault" {
|
if kms == "vault" {
|
||||||
// check new passphrase created
|
// check new passphrase created
|
||||||
stdOut, _ := readVaultSecret(rbdImageHandle, f)
|
stdOut, _ := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||||
if stdOut != "" {
|
if stdOut != "" {
|
||||||
Fail(fmt.Sprintf("passphrase found in vault while should be deleted: %s", stdOut))
|
Fail(fmt.Sprintf("passphrase found in vault while should be deleted: %s", stdOut))
|
||||||
}
|
}
|
||||||
@ -979,7 +1004,7 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) {
|
|||||||
// }
|
// }
|
||||||
|
|
||||||
func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||||
volname, _, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -987,11 +1012,11 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
|
|||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
LabelSelector: "app=rook-ceph-tools",
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
}
|
}
|
||||||
_, stdErr := execCommandInPod(f, "ceph fs subvolume rm myfs "+volname+" csi", rookNamespace, &opt)
|
_, stdErr := execCommandInPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" csi", rookNamespace, &opt)
|
||||||
Expect(stdErr).Should(BeEmpty())
|
Expect(stdErr).Should(BeEmpty())
|
||||||
|
|
||||||
if stdErr != "" {
|
if stdErr != "" {
|
||||||
return fmt.Errorf("error deleting backing volume %s", volname)
|
return fmt.Errorf("error deleting backing volume %s", imageData.imageName)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1040,7 +1065,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
|||||||
app.Labels = map[string]string{"app": "validate-data"}
|
app.Labels = map[string]string{"app": "validate-data"}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
|
|
||||||
err = createPVCAndApp("", f, pvc, app)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1074,7 +1099,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||||
rbdImage, _, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1083,7 +1108,7 @@ func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
|||||||
LabelSelector: "app=rook-ceph-tools",
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := fmt.Sprintf("rbd rm %s --pool=replicapool", rbdImage)
|
cmd := fmt.Sprintf("rbd rm %s --pool=replicapool", imageData.imageName)
|
||||||
execCommandInPod(f, cmd, rookNamespace, &opt)
|
execCommandInPod(f, cmd, rookNamespace, &opt)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1161,7 +1186,7 @@ func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFla
|
|||||||
app.Labels = map[string]string{"app": "validate-mount-opt"}
|
app.Labels = map[string]string{"app": "validate-mount-opt"}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
|
|
||||||
err = createPVCAndApp("", f, pvc, app)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1182,3 +1207,138 @@ func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFla
|
|||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createNodeLabel(f *framework.Framework, labelKey, labelValue string) {
|
||||||
|
// NOTE: This makes all nodes (in a multi-node setup) in the test take
|
||||||
|
// the same label values, which is fine for the test
|
||||||
|
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
Expect(err).Should(BeNil())
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteNodeLabel(c clientset.Interface, labelKey string) {
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
Expect(err).Should(BeNil())
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkNodeHasLabel(c clientset.Interface, labelKey, labelValue string) {
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
Expect(err).Should(BeNil())
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opt := metav1.ListOptions{
|
||||||
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
|
||||||
|
Expect(stdErr).Should(BeEmpty())
|
||||||
|
|
||||||
|
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opt := metav1.ListOptions{
|
||||||
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr := execCommandInPod(f, "rados listomapkeys -p "+pool+" csi.volume."+imageData.imageID, rookNamespace, &opt)
|
||||||
|
Expect(stdErr).Should(BeEmpty())
|
||||||
|
|
||||||
|
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opt := metav1.ListOptions{
|
||||||
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr := execCommandInPod(f, "rados getomapval -p "+pool+" csi.volumes.default csi.volume."+imageData.pvName, rookNamespace, &opt)
|
||||||
|
Expect(stdErr).Should(BeEmpty())
|
||||||
|
|
||||||
|
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBoundPV returns a PV details.
|
||||||
|
func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||||
|
// Get new copy of the claim
|
||||||
|
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the bound PV
|
||||||
|
pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
return pv, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) {
|
||||||
|
pv, err := getBoundPV(f.ClientSet, pvc)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
|
||||||
|
Fail("Found empty NodeSelectorTerms in PV")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
|
||||||
|
rFound := false
|
||||||
|
zFound := false
|
||||||
|
|
||||||
|
switch expression.Key {
|
||||||
|
case nodeCSIRegionLabel:
|
||||||
|
if rFound {
|
||||||
|
Fail("Found multiple occurrences of topology key for region")
|
||||||
|
}
|
||||||
|
rFound = true
|
||||||
|
if expression.Values[0] != regionValue {
|
||||||
|
Fail("Topology value for region label mismatch")
|
||||||
|
}
|
||||||
|
case nodeCSIZoneLabel:
|
||||||
|
if zFound {
|
||||||
|
Fail("Found multiple occurrences of topology key for zone")
|
||||||
|
}
|
||||||
|
zFound = true
|
||||||
|
if expression.Values[0] != zoneValue {
|
||||||
|
Fail("Topology value for zone label mismatch")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
Fail("Unexpected key in node selector terms found in PV")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTopologyDomainsToDSYaml(template, labels string) string {
|
||||||
|
return strings.ReplaceAll(template, "# - \"--domainlabels=failure-domain/region,failure-domain/zone\"",
|
||||||
|
"- \"--domainlabels="+labels+"\"")
|
||||||
|
}
|
||||||
|
@ -81,6 +81,9 @@ K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,Vol
|
|||||||
#extra-config for kube https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/
|
#extra-config for kube https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/
|
||||||
EXTRA_CONFIG=${EXTRA_CONFIG:-"--extra-config=apiserver.enable-admission-plugins=PodSecurityPolicy"}
|
EXTRA_CONFIG=${EXTRA_CONFIG:-"--extra-config=apiserver.enable-admission-plugins=PodSecurityPolicy"}
|
||||||
|
|
||||||
|
#extra Rook configuration
|
||||||
|
ROOK_BLOCK_POOL_NAME=${ROOK_BLOCK_POOL_NAME:-"newrbdpool"}
|
||||||
|
|
||||||
case "${1:-}" in
|
case "${1:-}" in
|
||||||
up)
|
up)
|
||||||
install_minikube
|
install_minikube
|
||||||
@ -118,6 +121,16 @@ deploy-rook)
|
|||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
"$DIR"/rook.sh deploy
|
"$DIR"/rook.sh deploy
|
||||||
;;
|
;;
|
||||||
|
create-block-pool)
|
||||||
|
echo "creating a block pool named $ROOK_BLOCK_POOL_NAME"
|
||||||
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
|
"$DIR"/rook.sh create-block-pool
|
||||||
|
;;
|
||||||
|
delete-block-pool)
|
||||||
|
echo "deleting block pool named $ROOK_BLOCK_POOL_NAME"
|
||||||
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
|
"$DIR"/rook.sh delete-block-pool
|
||||||
|
;;
|
||||||
teardown-rook)
|
teardown-rook)
|
||||||
echo "teardown rook"
|
echo "teardown rook"
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
@ -150,9 +163,11 @@ Available Commands:
|
|||||||
clean Deletes a local kubernetes cluster
|
clean Deletes a local kubernetes cluster
|
||||||
ssh Log into or run a command on a minikube machine with SSH
|
ssh Log into or run a command on a minikube machine with SSH
|
||||||
deploy-rook Deploy rook to minikube
|
deploy-rook Deploy rook to minikube
|
||||||
|
create-block-pool Creates a rook block pool (named $ROOK_BLOCK_POOL_NAME)
|
||||||
|
delete-block-pool Deletes a rook block pool (named $ROOK_BLOCK_POOL_NAME)
|
||||||
teardown-rook Teardown a rook from minikube
|
teardown-rook Teardown a rook from minikube
|
||||||
cephcsi copy built docker images to kubernetes cluster
|
cephcsi Copy built docker images to kubernetes cluster
|
||||||
k8s-sidecar copy kubernetes sidecar docker images to kubernetes cluster
|
k8s-sidecar Copy kubernetes sidecar docker images to kubernetes cluster
|
||||||
" >&2
|
" >&2
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
ROOK_VERSION=${ROOK_VERSION:-"v1.1.7"}
|
ROOK_VERSION=${ROOK_VERSION:-"v1.1.7"}
|
||||||
ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300}
|
ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300}
|
||||||
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph"
|
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph"
|
||||||
|
ROOK_BLOCK_POOL_NAME=${ROOK_BLOCK_POOL_NAME:-"newrbdpool"}
|
||||||
|
|
||||||
function deploy_rook() {
|
function deploy_rook() {
|
||||||
kubectl create -f "${ROOK_URL}/common.yaml"
|
kubectl create -f "${ROOK_URL}/common.yaml"
|
||||||
@ -37,6 +38,40 @@ function teardown_rook() {
|
|||||||
kubectl delete -f "${ROOK_URL}/common.yaml"
|
kubectl delete -f "${ROOK_URL}/common.yaml"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function create_block_pool() {
|
||||||
|
curl -o newpool.yaml "${ROOK_URL}/pool-test.yaml"
|
||||||
|
sed -i "s/replicapool/$ROOK_BLOCK_POOL_NAME/g" newpool.yaml
|
||||||
|
kubectl create -f "./newpool.yaml"
|
||||||
|
rm -f "./newpool.yaml"
|
||||||
|
|
||||||
|
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||||
|
echo "Checking RBD ($ROOK_BLOCK_POOL_NAME) stats... ${retry}s" && sleep 5
|
||||||
|
|
||||||
|
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
||||||
|
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
|
||||||
|
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
|
||||||
|
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
|
||||||
|
|
||||||
|
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$ROOK_BLOCK_POOL_NAME" &>/dev/null; then
|
||||||
|
echo "RBD ($ROOK_BLOCK_POOL_NAME) is successfully created..."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
|
||||||
|
echo "[Timeout] Failed to get RBD pool $ROOK_BLOCK_POOL_NAME stats"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
function delete_block_pool() {
|
||||||
|
curl -o newpool.yaml "${ROOK_URL}/pool-test.yaml"
|
||||||
|
sed -i "s/replicapool/$ROOK_BLOCK_POOL_NAME/g" newpool.yaml
|
||||||
|
kubectl delete -f "./newpool.yaml"
|
||||||
|
rm -f "./newpool.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
function check_ceph_cluster_health() {
|
function check_ceph_cluster_health() {
|
||||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||||
echo "Wait for rook deploy... ${retry}s" && sleep 5
|
echo "Wait for rook deploy... ${retry}s" && sleep 5
|
||||||
@ -115,11 +150,19 @@ deploy)
|
|||||||
teardown)
|
teardown)
|
||||||
teardown_rook
|
teardown_rook
|
||||||
;;
|
;;
|
||||||
|
create-block-pool)
|
||||||
|
create_block_pool
|
||||||
|
;;
|
||||||
|
delete-block-pool)
|
||||||
|
delete_block_pool
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo " $0 [command]
|
echo " $0 [command]
|
||||||
Available Commands:
|
Available Commands:
|
||||||
deploy Deploy a rook
|
deploy Deploy a rook
|
||||||
teardown Teardown a rook
|
teardown Teardown a rook
|
||||||
|
create-block-pool Create a rook block pool
|
||||||
|
delete-block-pool Delete a rook block pool
|
||||||
" >&2
|
" >&2
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -6,6 +6,7 @@ set -e
|
|||||||
export KUBE_VERSION=$1
|
export KUBE_VERSION=$1
|
||||||
sudo scripts/minikube.sh up
|
sudo scripts/minikube.sh up
|
||||||
sudo scripts/minikube.sh deploy-rook
|
sudo scripts/minikube.sh deploy-rook
|
||||||
|
sudo scripts/minikube.sh create-block-pool
|
||||||
# pull docker images to speed up e2e
|
# pull docker images to speed up e2e
|
||||||
sudo scripts/minikube.sh cephcsi
|
sudo scripts/minikube.sh cephcsi
|
||||||
sudo scripts/minikube.sh k8s-sidecar
|
sudo scripts/minikube.sh k8s-sidecar
|
||||||
|
Loading…
Reference in New Issue
Block a user