Add support for erasure coded pools

This commit adds support to mention dataPool parameter for the
topology constrained pools in the StorageClass, that can be
leveraged to mention erasure coded pool names to use for RBD
data instead of the replica pools.

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR 2020-04-06 16:19:13 -04:00 committed by mergify[bot]
parent 3f06fedf61
commit 1a8f8e3c24
11 changed files with 166 additions and 88 deletions

View File

@ -30,7 +30,7 @@ cluster.
the following parameters are available to configure kubernetes cluster
| flag | description |
| flag | description |
| ----------------- | ------------------------------------------------------------- |
| up | Starts a local kubernetes cluster and prepare a disk for rook |
| down | Stops a running local kubernetes cluster |
@ -45,16 +45,16 @@ the following parameters are available to configure kubernetes cluster
following environment variables can be exported to customize kubernetes deployment
| ENV | Description | Default |
| ------------------ | ------------------------------------------------ | ------------------------------------------------------------------ |
| MINIKUBE_VERSION | minikube version to install | latest |
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
| VM_DRIVER | VM driver to create virtual machine | virtualbox |
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
| ROOK_BLOCK_POOL_NAME | Block pool name to create in the rook instance | newrbdpool |
| ENV | Description | Default |
|----------------------|--------------------------------------------------|--------------------------------------------------------------------|
| MINIKUBE_VERSION | minikube version to install | latest |
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
| VM_DRIVER | VM driver to create virtual machine | virtualbox |
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |
| K8S_IMAGE_REPO | Repo URL to pull kubernetes sidecar images | quay.io/k8scsi |
| K8S_FEATURE_GATES | Feature gates to enable on kubernetes cluster | BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true |
| ROOK_BLOCK_POOL_NAME | Block pool name to create in the rook instance | newrbdpool |
- creating kubernetes cluster

View File

@ -23,13 +23,14 @@ var (
rbdDeploymentName = "csi-rbdplugin-provisioner"
rbdDaemonsetName = "csi-rbdplugin"
// Topology related variables
nodeRegionLabel = "test.failure-domain/region"
regionValue = "testregion"
nodeZoneLabel = "test.failure-domain/zone"
zoneValue = "testzone"
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
rbdTopologyPool = "newrbdpool"
nodeRegionLabel = "test.failure-domain/region"
regionValue = "testregion"
nodeZoneLabel = "test.failure-domain/zone"
zoneValue = "testzone"
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
rbdTopologyPool = "newrbdpool"
rbdTopologyDataPool = "replicapool" // NOTE: should be different than rbdTopologyPool for test to be effective
)
func deployRBDPlugin() {
@ -125,9 +126,9 @@ var _ = Describe("RBD", func() {
// deploy RBD CSI
BeforeEach(func() {
c = f.ClientSet
createNodeLabel(f, nodeRegionLabel, regionValue)
createNodeLabel(f, nodeZoneLabel, zoneValue)
if deployRBD {
createNodeLabel(f, nodeRegionLabel, regionValue)
createNodeLabel(f, nodeZoneLabel, zoneValue)
if cephCSINamespace != defaultNs {
err := createNamespace(c, cephCSINamespace)
if err != nil {
@ -519,11 +520,44 @@ var _ = Describe("RBD", func() {
Fail(err.Error())
}
// cleanup and undo changes made by the test
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
Fail(err.Error())
}
By("checking if data pool parameter is honored", func() {
deleteResource(rbdExamplePath + "storageclass.yaml")
topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool +
"\",\"domainSegments\":" +
"[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," +
"{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]"
createRBDStorageClass(f.ClientSet, f,
map[string]string{"volumeBindingMode": "WaitForFirstConsumer"},
map[string]string{"topologyConstrainedPools": topologyConstraint})
By("creating an app using a PV from the delayed binding mode StorageClass with a data pool")
pvc, app = createPVCAndAppBinding(pvcPath, appPath, f, 0)
By("ensuring created PV has its image in the topology specific pool")
err = checkPVCImageInPool(f, pvc, rbdTopologyPool)
if err != nil {
Fail(err.Error())
}
By("ensuring created image has the right data pool parameter set")
err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool)
if err != nil {
Fail(err.Error())
}
// cleanup and undo changes made by the test
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
Fail(err.Error())
}
})
// cleanup and undo changes made by the test
deleteResource(rbdExamplePath + "storageclass.yaml")
createRBDStorageClass(f.ClientSet, f, nil, nil)
})

View File

@ -1234,21 +1234,40 @@ func checkNodeHasLabel(c clientset.Interface, labelKey, labelValue string) {
}
}
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) {
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
if err != nil {
return err
return "", err
}
opt := metav1.ListOptions{
LabelSelector: "app=rook-ceph-tools",
}
_, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
stdOut, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
Expect(stdErr).Should(BeEmpty())
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
return stdOut, nil
}
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
_, err := getPVCImageInfoInPool(f, pvc, pool)
return err
}
func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error {
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
if err != nil {
return err
}
if !strings.Contains(stdOut, "data_pool: "+dataPool) {
return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut)
}
return nil
}

View File

@ -58,18 +58,21 @@ parameters:
# For further information read TODO<doc>
# topologyConstrainedPools: |
# [{"poolName":"pool0",
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone1"}]},
# "dataPool":"ec-pool0" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone1"}]},
# {"poolName":"pool1",
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone2"}]},
# "dataPool":"ec-pool1" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone2"}]},
# {"poolName":"pool2",
# "domainSegments":[
# {"domainLabel":"region","value":"west"},
# {"domainLabel":"zone","value":"zone1"}]}
# ]
# "dataPool":"ec-pool2" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"west"},
# {"domainLabel":"zone","value":"zone1"}]}
# ]
reclaimPolicy: Delete
allowVolumeExpansion: true

View File

@ -110,7 +110,7 @@ func undoVolReservation(ctx context.Context, volOptions *volumeOptions, vid volu
func updateTopologyConstraints(volOpts *volumeOptions) error {
// update request based on topology constrained parameters (if present)
poolName, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
poolName, _, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
if err != nil {
return err
}

View File

@ -282,12 +282,13 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
return nil
}
// update request based on topology constrained parameters (if present)
poolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
poolName, dataPoolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
if err != nil {
return err
}
if poolName != "" {
rbdVol.Pool = poolName
rbdVol.DataPool = dataPoolName
rbdVol.Topology = topology
}

View File

@ -125,6 +125,7 @@ type topologySegment struct {
// TopologyConstrainedPool stores the pool name and a list of its associated topology domain values
type TopologyConstrainedPool struct {
PoolName string `json:"poolName"`
DataPoolName string `json:"dataPool"`
DomainSegments []topologySegment `json:"domainSegments"`
}
@ -178,7 +179,7 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
topologyPools, poolName)
}
_, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements)
_, _, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements)
return topology, err
}
@ -188,28 +189,28 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
// The return variables are, image poolname, data poolname, and topology map of
// matched requirement
func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
accessibilityRequirements *csi.TopologyRequirement) (string, map[string]string, error) {
accessibilityRequirements *csi.TopologyRequirement) (string, string, map[string]string, error) {
if topologyPools == nil || accessibilityRequirements == nil {
return "", nil, nil
return "", "", nil, nil
}
// select pool that fits first topology constraint preferred requirements
for _, topology := range accessibilityRequirements.GetPreferred() {
poolName := matchPoolToTopology(topologyPools, topology)
if poolName != "" {
return poolName, topology.GetSegments(), nil
topologyPool := matchPoolToTopology(topologyPools, topology)
if topologyPool.PoolName != "" {
return topologyPool.PoolName, topologyPool.DataPoolName, topology.GetSegments(), nil
}
}
// If preferred mismatches, check requisite for a fit
for _, topology := range accessibilityRequirements.GetRequisite() {
poolName := matchPoolToTopology(topologyPools, topology)
if poolName != "" {
return poolName, topology.GetSegments(), nil
topologyPool := matchPoolToTopology(topologyPools, topology)
if topologyPool.PoolName != "" {
return topologyPool.PoolName, topologyPool.DataPoolName, topology.GetSegments(), nil
}
}
return "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+
return "", "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+
"topology constraints : pools (%+v) requested topology (%+v)",
*topologyPools, *accessibilityRequirements)
}
@ -217,7 +218,7 @@ func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
// matchPoolToTopology loops through passed in pools, and for each pool checks if all
// requested topology segments are present and match the request, returning the first pool
// that hence matches (or an empty string if none match)
func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi.Topology) string {
func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi.Topology) TopologyConstrainedPool {
domainMap := extractDomainsFromlabels(topology)
// check if any pool matches all the domain keys and values
@ -235,10 +236,10 @@ func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi
continue
}
return topologyPool.PoolName
return topologyPool
}
return ""
return TopologyConstrainedPool{}
}
// extractDomainsFromlabels returns the domain name map, from passed in domain segments,

View File

@ -224,91 +224,91 @@ func TestFindPoolAndTopology(t *testing.T) {
return nil
}
// Test nil values
_, _, err = FindPoolAndTopology(nil, nil)
_, _, _, err = FindPoolAndTopology(nil, nil)
if err != nil {
t.Errorf("expected success due to nil in-args (%v)", err)
}
poolName, _, err := FindPoolAndTopology(&validMultipleTopoPools, nil)
poolName, _, _, err := FindPoolAndTopology(&validMultipleTopoPools, nil)
if err != nil || poolName != "" {
t.Errorf("expected success due to nil accessibility requirements (err - %v) (poolName - %s)", err, poolName)
}
poolName, _, err = FindPoolAndTopology(nil, &validAccReq)
poolName, _, _, err = FindPoolAndTopology(nil, &validAccReq)
if err != nil || poolName != "" {
t.Errorf("expected success due to nil topology pools (err - %v) (poolName - %s)", err, poolName)
}
// Test valid accessibility requirement, with invalid topology pools values
_, _, err = FindPoolAndTopology(&emptyTopoPools, &validAccReq)
_, _, _, err = FindPoolAndTopology(&emptyTopoPools, &validAccReq)
if err == nil {
t.Errorf("expected failure due to empty topology pools")
}
_, _, err = FindPoolAndTopology(&emptyPoolNameTopoPools, &validAccReq)
_, _, _, err = FindPoolAndTopology(&emptyPoolNameTopoPools, &validAccReq)
if err == nil {
t.Errorf("expected failure due to missing pool name in topology pools")
}
_, _, err = FindPoolAndTopology(&differentDomainsInTopoPools, &validAccReq)
_, _, _, err = FindPoolAndTopology(&differentDomainsInTopoPools, &validAccReq)
if err == nil {
t.Errorf("expected failure due to mismatching domains in topology pools")
}
// Test valid topology pools, with invalid accessibility requirements
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &emptyAccReq)
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &emptyAccReq)
if err == nil {
t.Errorf("expected failure due to empty accessibility requirements")
}
_, _, err = FindPoolAndTopology(&validSingletonTopoPools, &emptySegmentAccReq)
_, _, _, err = FindPoolAndTopology(&validSingletonTopoPools, &emptySegmentAccReq)
if err == nil {
t.Errorf("expected failure due to empty segments in accessibility requirements")
}
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialHigherSegmentAccReq)
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialHigherSegmentAccReq)
if err == nil {
t.Errorf("expected failure due to partial segments in accessibility requirements")
}
_, _, err = FindPoolAndTopology(&validSingletonTopoPools, &partialLowerSegmentAccReq)
_, _, _, err = FindPoolAndTopology(&validSingletonTopoPools, &partialLowerSegmentAccReq)
if err == nil {
t.Errorf("expected failure due to partial segments in accessibility requirements")
}
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialLowerSegmentAccReq)
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialLowerSegmentAccReq)
if err == nil {
t.Errorf("expected failure due to partial segments in accessibility requirements")
}
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &differentSegmentAccReq)
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &differentSegmentAccReq)
if err == nil {
t.Errorf("expected failure due to mismatching segments in accessibility requirements")
}
// Test success cases
// If a pool is a superset of domains (either empty domain labels or partial), it can be selected
poolName, topoSegment, err := FindPoolAndTopology(&emptyDomainsInTopoPools, &validAccReq)
poolName, _, topoSegment, err := FindPoolAndTopology(&emptyDomainsInTopoPools, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
}
poolName, topoSegment, err = FindPoolAndTopology(&partialDomainsInTopoPools, &validAccReq)
poolName, _, topoSegment, err = FindPoolAndTopology(&partialDomainsInTopoPools, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
}
// match in a singleton topology pools
poolName, topoSegment, err = FindPoolAndTopology(&validSingletonTopoPools, &validAccReq)
poolName, _, topoSegment, err = FindPoolAndTopology(&validSingletonTopoPools, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
}
// match first in multiple topology pools
poolName, topoSegment, err = FindPoolAndTopology(&validMultipleTopoPools, &validAccReq)
poolName, _, topoSegment, err = FindPoolAndTopology(&validMultipleTopoPools, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
@ -317,12 +317,25 @@ func TestFindPoolAndTopology(t *testing.T) {
// match non-first in multiple topology pools
switchPoolOrder := []TopologyConstrainedPool{}
switchPoolOrder = append(switchPoolOrder, validMultipleTopoPools[1], validMultipleTopoPools[0])
poolName, topoSegment, err = FindPoolAndTopology(&switchPoolOrder, &validAccReq)
poolName, _, topoSegment, err = FindPoolAndTopology(&switchPoolOrder, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
}
// test valid dataPool return
for i := range switchPoolOrder {
switchPoolOrder[i].DataPoolName = "ec-" + switchPoolOrder[i].PoolName
}
poolName, dataPoolName, topoSegment, err := FindPoolAndTopology(&switchPoolOrder, &validAccReq)
err = checkOutput(err, poolName, topoSegment)
if err != nil {
t.Errorf("expected success got: (%v)", err)
}
if dataPoolName != "ec-"+poolName {
t.Errorf("expected data pool to be named ec-%s, got %s", poolName, dataPoolName)
}
// TEST: MatchTopologyForPool
// check for non-existent pool
_, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1+"fuzz")

View File

@ -12,6 +12,12 @@ RBD_CHART_NAME="ceph-csi-rbd"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
DEPLOY_TIMEOUT=600
# ceph-csi specific variables
NODE_LABEL_REGION="test.failure-domain/region"
NODE_LABEL_ZONE="test.failure-domain/zone"
REGION_VALUE="testregion"
ZONE_VALUE="testzone"
function check_deployment_status() {
LABEL=$1
NAMESPACE=$2
@ -130,6 +136,13 @@ install_cephcsi_helm_charts() {
if [ -z "$NAMESPACE" ]; then
NAMESPACE="default"
fi
# label the nodes uniformly for domain information
for node in $(kubectl get node -o jsonpath='{.items[*].metadata.name}'); do
kubectl label node/"${node}" ${NODE_LABEL_REGION}=${REGION_VALUE}
kubectl label node/"${node}" ${NODE_LABEL_ZONE}=${ZONE_VALUE}
done
# install ceph-csi-cephfs and ceph-csi-rbd charts
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
@ -139,7 +152,7 @@ install_cephcsi_helm_charts() {
# deleting configmap as a workaround to avoid configmap already present
# issue when installing ceph-csi-rbd
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}"
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
@ -149,6 +162,13 @@ install_cephcsi_helm_charts() {
cleanup_cephcsi_helm_charts() {
"${HELM}" del --purge ${CEPHFS_CHART_NAME}
"${HELM}" del --purge ${RBD_CHART_NAME}
# remove set labels
for node in $(kubectl get node --no-headers | cut -f 1 -d ' '); do
kubectl label node/"$node" test.failure-domain/region-
kubectl label node/"$node" test.failure-domain/zone-
done
# TODO/LATER we could remove the CSI labels that would have been set as well
}
helm_reset() {

View File

@ -25,7 +25,7 @@ function deploy_rook() {
# Check if CephBlockPool is empty
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
check_rbd_stat
check_rbd_stat ""
fi
}
@ -44,25 +44,7 @@ function create_block_pool() {
kubectl create -f "./newpool.yaml"
rm -f "./newpool.yaml"
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
echo "Checking RBD ($ROOK_BLOCK_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$ROOK_BLOCK_POOL_NAME" &>/dev/null; then
echo "RBD ($ROOK_BLOCK_POOL_NAME) is successfully created..."
break
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get RBD pool $ROOK_BLOCK_POOL_NAME stats"
exit 1
fi
echo ""
check_rbd_stat "$ROOK_BLOCK_POOL_NAME"
}
function delete_block_pool() {
@ -122,7 +104,11 @@ function check_mds_stat() {
function check_rbd_stat() {
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
if [ -z "$1" ]; then
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
else
RBD_POOL_NAME=$1
fi
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')

View File

@ -6,6 +6,7 @@ set -e
export KUBE_VERSION=$1
sudo scripts/minikube.sh up
sudo scripts/minikube.sh deploy-rook
sudo scripts/minikube.sh create-block-pool
# pull docker images to speed up e2e
sudo scripts/minikube.sh cephcsi
sudo scripts/minikube.sh k8s-sidecar