ci: add files to run Kubernetes external storage e2e suite

Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2020-09-24 11:20:36 +02:00 committed by mergify[bot]
parent 27c8318a82
commit fd4328cd53
6 changed files with 216 additions and 0 deletions

View File

@ -0,0 +1,10 @@
# Kubernetes external storage e2e test suite
The files in this directory are used by the k8s-e2e-external-storage CI job.
This job runs the [Kubernetes end-to-end external storage tests][1] with
different driver configurations/manifests (in the `driver-*.yaml` files). Each
driver configuration refers to a StorageClass that is used while testing. The
StorageClasses are created with the `create-storageclass.sh` script and the
`sc-*.yaml.in` templates.
[1]: https://github.com/kubernetes/kubernetes/tree/master/test/e2e/storage/external

View File

@ -0,0 +1,27 @@
#!/bin/sh
#
# Create StorageClasses from a template (sc-*.yaml.in) and replace keywords
# like @@CLUSTER_ID@@.
#
# These StorageClasses can then be used by driver-*.yaml manifests in the
# k8s-e2e-external-storage CI job.
#
# Requirements:
# - kubectl in the path
# - working KUBE_CONFIG either in environment, or default config files
# - deployment done with Rook
#
# exit on error
set -e
WORKDIR=$(dirname "${0}")
TOOLBOX_POD=$(kubectl -n rook-ceph get pods --no-headers -l app=rook-ceph-tools -o=jsonpath='{.items[0].metadata.name}')
FS_ID=$(kubectl -n rook-ceph exec "${TOOLBOX_POD}" ceph fsid)
for sc in "${WORKDIR}"/sc-*.yaml.in
do
sed "s/@@CLUSTER_ID@@/${FS_ID}/" "${sc}" |
kubectl create -f -
done

View File

@ -0,0 +1,67 @@
---
ShortName: cephcsi-cephfs-test
StorageClass:
FromExistingClassName: k8s-storage-e2e-cephfs
# FromFile: sc-cephfs.yaml
SnapshotClass:
# Must be set to enable snapshotting tests
FromName: false
DriverInfo:
# Internal name of the driver, display name in the test case and test objects
Name: cephfs.csi.ceph.com
# The range of disk size supported by this driver
SupportedSizeRange:
Min: 1Gi
Max: 16Ti
# Map of strings for supported mount options
SupportedMountOption:
rw: {}
# Map of strings for required mount options
RequiredMountOption:
rw: {}
# Optional list of access modes required for provisiong. Default is RWO
# RequiredAcccessModes:
# Map that represents the capabilities the driver supports
Capabilities:
# Data is persistest accross pod restarts
persistence: true
# Volume ownership via fsGroup
fsGroup: true
# Raw block mode
block: false
# Exec a file in the volume
exec: true
# Support for volume limits
volumeLimits: false
# Support for volume expansion in controllers
controllerExpansion: false
# Support for volume expansion in nodes
nodeExpansion: false
# Support volume that an run on single node only (like hostpath)
singleNodeVolume: false
# Support ReadWriteMany access modes
RWX: true
# Support topology
topology: false
# Support populate data from snapshot
snapshotDataSource: false
# Support populated data from PVC
pvcDataSource: false

View File

@ -0,0 +1,72 @@
---
ShortName: cephcsi-rbd-rwo-test
StorageClass:
FromExistingClassName: k8s-storage-e2e-rbd-rwo
# FromFile: sc-rbd.yaml
SnapshotClass:
# Must be set to enable snapshotting tests
FromName: true
DriverInfo:
# Internal name of the driver, display name in the test case and test objects
Name: rbd-rwo.csi.ceph.com
# The range of disk size supported by this driver
SupportedSizeRange:
Min: 1Gi
Max: 16Ti
# Map of strings for supported FS types
SupportedFsType:
ext4: {}
xfs: {}
# Map of strings for supported mount options
SupportedMountOption:
rw: {}
# Map of strings for required mount options
RequiredMountOption:
rw: {}
# Optional list of access modes required for provisiong. Default is RWO
# RequiredAcccessModes:
# Map that represents the capabilities the driver supports
Capabilities:
# Data is persistest accross pod restarts
persistence: true
# Volume ownership via fsGroup
fsGroup: false
# Raw block mode
block: true
# Exec a file in the volume
exec: true
# Support for volume limits
volumeLimits: false
# Support for volume expansion in controllers
controllerExpansion: false
# Support for volume expansion in nodes
nodeExpansion: false
# Support volume that an run on single node only (like hostpath)
singleNodeVolume: false
# Support ReadWriteMany access modes
RWX: false
# Support topology
topology: false
# Support populate data from snapshot
snapshotDataSource: false
# Support populated data from PVC
pvcDataSource: false

View File

@ -0,0 +1,19 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: k8s-storage-e2e-cephfs
provisioner: cephfs.csi.ceph.com
parameters:
clusterID: @@CLUSTER_ID@@
fsName: myfs
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- debug

View File

@ -0,0 +1,21 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: k8s-storage-e2e-rbd-rwo
provisioner: rbd.csi.ceph.com
parameters:
clusterID: @@CLUSTER_ID@@
pool: replicapool
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard