mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Add support for erasure coded pools
This commit adds support to mention dataPool parameter for the topology constrained pools in the StorageClass, that can be leveraged to mention erasure coded pool names to use for RBD data instead of the replica pools. Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
3f06fedf61
commit
1a8f8e3c24
@ -30,7 +30,7 @@ cluster.
|
||||
|
||||
the following parameters are available to configure kubernetes cluster
|
||||
|
||||
| flag | description |
|
||||
| flag | description |
|
||||
| ----------------- | ------------------------------------------------------------- |
|
||||
| up | Starts a local kubernetes cluster and prepare a disk for rook |
|
||||
| down | Stops a running local kubernetes cluster |
|
||||
@ -45,16 +45,16 @@ the following parameters are available to configure kubernetes cluster
|
||||
|
||||
following environment variables can be exported to customize kubernetes deployment
|
||||
|
||||
| ENV | Description | Default |
|
||||
| ------------------ | ------------------------------------------------ | ------------------------------------------------------------------ |
|
||||
| MINIKUBE_VERSION | minikube version to install | latest |
|
||||
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
|
||||
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
|
||||
| VM_DRIVER | VM driver to create virtual machine | virtualbox |
|
||||
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
|
||||
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
|
||||
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
|
||||
| ROOK_BLOCK_POOL_NAME | Block pool name to create in the rook instance | newrbdpool |
|
||||
| ENV | Description | Default |
|
||||
|----------------------|--------------------------------------------------|--------------------------------------------------------------------|
|
||||
| MINIKUBE_VERSION | minikube version to install | latest |
|
||||
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
|
||||
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
|
||||
| VM_DRIVER | VM driver to create virtual machine | virtualbox |
|
||||
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
|
||||
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
|
||||
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
|
||||
| ROOK_BLOCK_POOL_NAME | Block pool name to create in the rook instance | newrbdpool |
|
||||
|
||||
- creating kubernetes cluster
|
||||
|
||||
|
54
e2e/rbd.go
54
e2e/rbd.go
@ -23,13 +23,14 @@ var (
|
||||
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
||||
rbdDaemonsetName = "csi-rbdplugin"
|
||||
// Topology related variables
|
||||
nodeRegionLabel = "test.failure-domain/region"
|
||||
regionValue = "testregion"
|
||||
nodeZoneLabel = "test.failure-domain/zone"
|
||||
zoneValue = "testzone"
|
||||
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
|
||||
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
|
||||
rbdTopologyPool = "newrbdpool"
|
||||
nodeRegionLabel = "test.failure-domain/region"
|
||||
regionValue = "testregion"
|
||||
nodeZoneLabel = "test.failure-domain/zone"
|
||||
zoneValue = "testzone"
|
||||
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
|
||||
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
|
||||
rbdTopologyPool = "newrbdpool"
|
||||
rbdTopologyDataPool = "replicapool" // NOTE: should be different than rbdTopologyPool for test to be effective
|
||||
)
|
||||
|
||||
func deployRBDPlugin() {
|
||||
@ -125,9 +126,9 @@ var _ = Describe("RBD", func() {
|
||||
// deploy RBD CSI
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||
if deployRBD {
|
||||
createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
@ -519,11 +520,44 @@ var _ = Describe("RBD", func() {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
// cleanup and undo changes made by the test
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
By("checking if data pool parameter is honored", func() {
|
||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool +
|
||||
"\",\"domainSegments\":" +
|
||||
"[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," +
|
||||
"{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]"
|
||||
createRBDStorageClass(f.ClientSet, f,
|
||||
map[string]string{"volumeBindingMode": "WaitForFirstConsumer"},
|
||||
map[string]string{"topologyConstrainedPools": topologyConstraint})
|
||||
|
||||
By("creating an app using a PV from the delayed binding mode StorageClass with a data pool")
|
||||
pvc, app = createPVCAndAppBinding(pvcPath, appPath, f, 0)
|
||||
|
||||
By("ensuring created PV has its image in the topology specific pool")
|
||||
err = checkPVCImageInPool(f, pvc, rbdTopologyPool)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
By("ensuring created image has the right data pool parameter set")
|
||||
err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
// cleanup and undo changes made by the test
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
// cleanup and undo changes made by the test
|
||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||
})
|
||||
|
25
e2e/utils.go
25
e2e/utils.go
@ -1234,21 +1234,40 @@ func checkNodeHasLabel(c clientset.Interface, labelKey, labelValue string) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||
func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: "app=rook-ceph-tools",
|
||||
}
|
||||
|
||||
_, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
|
||||
stdOut, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
|
||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
|
||||
return stdOut, nil
|
||||
}
|
||||
|
||||
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||
_, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error {
|
||||
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.Contains(stdOut, "data_pool: "+dataPool) {
|
||||
return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user