1
0
mirror of https://github.com/ceph/ceph-csi.git synced 2025-06-14 18:53:35 +00:00

Merge pull request from red-hat-storage/sync_us--devel

Syncing latest changes from upstream devel for ceph-csi
This commit is contained in:
openshift-merge-bot[bot]
2024-04-05 08:16:00 +00:00
committed by GitHub
4 changed files with 44 additions and 2 deletions

@ -5,9 +5,13 @@ This job runs the [Kubernetes end-to-end external storage tests][1] with
different driver configurations/manifests (in the `driver-*.yaml` files). Each different driver configurations/manifests (in the `driver-*.yaml` files). Each
driver configuration refers to a StorageClass that is used while testing. driver configuration refers to a StorageClass that is used while testing.
The StorageClasses are created with the `create-storageclass.sh` script and the The StorageClasses are created with the `create-storageclasses.sh` script and the
`sc-*.yaml.in` templates. `sc-*.yaml.in` templates.
The VolumeSnapshotClasses are created with the
`create-volumesnapshotclasses.sh` script and the
`volumesnapshotclass-*.yaml.in` templates.
The Ceph-CSI Configuration from the `ceph-csi-config` ConfigMap is created with The Ceph-CSI Configuration from the `ceph-csi-config` ConfigMap is created with
`create-configmap.sh` after the deployment is finished. The ConfigMap is `create-configmap.sh` after the deployment is finished. The ConfigMap is
referenced in the StorageClasses and contains the connection details for the referenced in the StorageClasses and contains the connection details for the

@ -0,0 +1,27 @@
#!/bin/sh
#
# Create VolumeSnapshotClasses from a template (volumesnapshotclass-*.yaml.in) and replace keywords
# like @@CLUSTER_ID@@.
#
# These VolumeSnapshotClasses can then be used by driver-*.yaml manifests in the
# k8s-e2e-external-storage CI job.
#
# Requirements:
# - kubectl in the path
# - working KUBE_CONFIG either in environment, or default config files
# - deployment done with Rook
#
# exit on error
set -e
WORKDIR=$(dirname "${0}")
TOOLBOX_POD=$(kubectl -n rook-ceph get pods --no-headers -l app=rook-ceph-tools -o=jsonpath='{.items[0].metadata.name}')
FS_ID=$(kubectl -n rook-ceph exec "${TOOLBOX_POD}" -- ceph fsid)
for sc in "${WORKDIR}"/volumesnapshotclass-*.yaml.in
do
sed "s/@@CLUSTER_ID@@/${FS_ID}/" "${sc}" |
kubectl create -f -
done

@ -10,7 +10,7 @@ parameters:
imageFeatures: layering imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph

@ -0,0 +1,11 @@
---
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: k8s-storage-e2e-cephfs
driver: cephfs.csi.ceph.com
parameters:
clusterID: @@CLUSTER_ID@@
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
deletionPolicy: Delete