Add support for erasure coded pools

This commit adds support to mention dataPool parameter for the
topology constrained pools in the StorageClass, that can be
leveraged to mention erasure coded pool names to use for RBD
data instead of the replica pools.

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR
2020-04-06 16:19:13 -04:00
committed by mergify[bot]
parent 3f06fedf61
commit 1a8f8e3c24
11 changed files with 166 additions and 88 deletions

View File

@ -12,6 +12,12 @@ RBD_CHART_NAME="ceph-csi-rbd"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
DEPLOY_TIMEOUT=600
# ceph-csi specific variables
NODE_LABEL_REGION="test.failure-domain/region"
NODE_LABEL_ZONE="test.failure-domain/zone"
REGION_VALUE="testregion"
ZONE_VALUE="testzone"
function check_deployment_status() {
LABEL=$1
NAMESPACE=$2
@ -130,6 +136,13 @@ install_cephcsi_helm_charts() {
if [ -z "$NAMESPACE" ]; then
NAMESPACE="default"
fi
# label the nodes uniformly for domain information
for node in $(kubectl get node -o jsonpath='{.items[*].metadata.name}'); do
kubectl label node/"${node}" ${NODE_LABEL_REGION}=${REGION_VALUE}
kubectl label node/"${node}" ${NODE_LABEL_ZONE}=${ZONE_VALUE}
done
# install ceph-csi-cephfs and ceph-csi-rbd charts
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
@ -139,7 +152,7 @@ install_cephcsi_helm_charts() {
# deleting configmap as a workaround to avoid configmap already present
# issue when installing ceph-csi-rbd
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}"
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
@ -149,6 +162,13 @@ install_cephcsi_helm_charts() {
cleanup_cephcsi_helm_charts() {
"${HELM}" del --purge ${CEPHFS_CHART_NAME}
"${HELM}" del --purge ${RBD_CHART_NAME}
# remove set labels
for node in $(kubectl get node --no-headers | cut -f 1 -d ' '); do
kubectl label node/"$node" test.failure-domain/region-
kubectl label node/"$node" test.failure-domain/zone-
done
# TODO/LATER we could remove the CSI labels that would have been set as well
}
helm_reset() {

View File

@ -25,7 +25,7 @@ function deploy_rook() {
# Check if CephBlockPool is empty
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
check_rbd_stat
check_rbd_stat ""
fi
}
@ -44,25 +44,7 @@ function create_block_pool() {
kubectl create -f "./newpool.yaml"
rm -f "./newpool.yaml"
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
echo "Checking RBD ($ROOK_BLOCK_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$ROOK_BLOCK_POOL_NAME" &>/dev/null; then
echo "RBD ($ROOK_BLOCK_POOL_NAME) is successfully created..."
break
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get RBD pool $ROOK_BLOCK_POOL_NAME stats"
exit 1
fi
echo ""
check_rbd_stat "$ROOK_BLOCK_POOL_NAME"
}
function delete_block_pool() {
@ -122,7 +104,11 @@ function check_mds_stat() {
function check_rbd_stat() {
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
if [ -z "$1" ]; then
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
else
RBD_POOL_NAME=$1
fi
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')

View File

@ -6,6 +6,7 @@ set -e
export KUBE_VERSION=$1
sudo scripts/minikube.sh up
sudo scripts/minikube.sh deploy-rook
sudo scripts/minikube.sh create-block-pool
# pull docker images to speed up e2e
sudo scripts/minikube.sh cephcsi
sudo scripts/minikube.sh k8s-sidecar