mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 11:50:18 +00:00
added examples
This commit is contained in:
parent
e2910f1c18
commit
e8ea0aa713
17
examples/README.md
Normal file
17
examples/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## How to test RBD and CephFS plugins with Kubernetes 1.11
|
||||
|
||||
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and `plugin-teardown.sh` helper scripts. You can use those to help you deploy/tear down RBACs, sidecar containers and the plugin in one go. By default, they look for the YAML manifests in `../../deploy/{rbd,cephfs}/kubernetes`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
|
||||
|
||||
Once the plugin is successfuly deployed, you'll need to customize `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster setup. Please consult the documentation for info about available parameters.
|
||||
|
||||
After configuring the secrets, monitors, etc. you can deploy a testing Pod mounting a RBD image / CephFS volume:
|
||||
```bash
|
||||
$ kubectl create -f secret.yaml
|
||||
$ kubectl create -f storageclass.yaml
|
||||
$ kubectl create -f pvc.yaml
|
||||
$ kubectl create -f pod.yaml
|
||||
```
|
||||
|
||||
Other helper scripts:
|
||||
* `logs.sh` output of the plugin
|
||||
* `exec-bash.sh` logs into the plugin's container and runs bash
|
27
examples/cephfs/deployment.yaml
Normal file
27
examples/cephfs/deployment.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: csicephfs-demo-depl
|
||||
labels:
|
||||
app: web-server
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web-server
|
||||
spec:
|
||||
containers:
|
||||
- name: web-server
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: mypvc
|
||||
mountPath: /var/lib/www/html
|
||||
volumes:
|
||||
- name: mypvc
|
||||
persistentVolumeClaim:
|
||||
claimName: csi-cephfs-pvc
|
||||
readOnly: false
|
15
examples/cephfs/exec-bash.sh
Executable file
15
examples/cephfs/exec-bash.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONTAINER_NAME=csi-cephfsplugin
|
||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||
|
||||
function get_pod_status() {
|
||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
||||
}
|
||||
|
||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||
sleep 1
|
||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||
done
|
||||
|
||||
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
|
15
examples/cephfs/logs.sh
Executable file
15
examples/cephfs/logs.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONTAINER_NAME=csi-cephfsplugin
|
||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||
|
||||
function get_pod_status() {
|
||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
||||
}
|
||||
|
||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||
sleep 1
|
||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||
done
|
||||
|
||||
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
|
15
examples/cephfs/plugin-deploy.sh
Executable file
15
examples/cephfs/plugin-deploy.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
deployment_base="${1}"
|
||||
|
||||
if [[ -z $deployment_base ]]; then
|
||||
deployment_base="../../deploy/cephfs/kubernetes"
|
||||
fi
|
||||
|
||||
cd "$deployment_base" || exit 1
|
||||
|
||||
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin)
|
||||
|
||||
for obj in ${objects[@]}; do
|
||||
kubectl create -f "./$obj.yaml"
|
||||
done
|
15
examples/cephfs/plugin-teardown.sh
Executable file
15
examples/cephfs/plugin-teardown.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
deployment_base="${1}"
|
||||
|
||||
if [[ -z $deployment_base ]]; then
|
||||
deployment_base="../../deploy/cephfs/kubernetes"
|
||||
fi
|
||||
|
||||
cd "$deployment_base" || exit 1
|
||||
|
||||
objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
||||
|
||||
for obj in ${objects[@]}; do
|
||||
kubectl delete -f "./$obj.yaml"
|
||||
done
|
17
examples/cephfs/pod.yaml
Normal file
17
examples/cephfs/pod.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: csicephfs-demo-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: web-server
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: mypvc
|
||||
mountPath: /var/lib/www
|
||||
volumes:
|
||||
- name: mypvc
|
||||
persistentVolumeClaim:
|
||||
claimName: csi-cephfs-pvc
|
||||
readOnly: false
|
||||
|
11
examples/cephfs/pvc.yaml
Normal file
11
examples/cephfs/pvc.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-cephfs-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: csi-cephfs
|
13
examples/cephfs/secret.yaml
Normal file
13
examples/cephfs/secret.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: csi-cephfs-secret
|
||||
namespace: default
|
||||
data:
|
||||
# Required if provisionVolume is set to false
|
||||
userID: BASE64-ENCODED-VALUE
|
||||
userKey: BASE64-ENCODED-VALUE
|
||||
|
||||
# Required if provisionVolume is set to true
|
||||
adminID: BASE64-ENCODED-VALUE
|
||||
adminKey: BASE64-ENCODED-VALUE
|
33
examples/cephfs/storageclass.yaml
Normal file
33
examples/cephfs/storageclass.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-cephfs
|
||||
provisioner: csi-cephfsplugin
|
||||
parameters:
|
||||
# Comma separated list of Ceph monitors
|
||||
monitors: mon1:port,mon2:port,...
|
||||
|
||||
# If set to true, a new volume will be created along with a RADOS user - this requires admin access.
|
||||
# If set to false, it is assumed the volume already exists and the user is expected to provide
|
||||
# a rootPath to a cephfs volume and user credentials.
|
||||
provisionVolume: "true"
|
||||
|
||||
# Ceph pool into which the volume shall be created
|
||||
# Required for provisionVolume: "true"
|
||||
pool: cephfs_data
|
||||
|
||||
# Root path of an existing CephFS volume
|
||||
# Required for provisionVolume: "false"
|
||||
# rootPath: /absolute/path
|
||||
|
||||
# The secrets have to contain user and/or Ceph admin credentials.
|
||||
csiProvisionerSecretName: csi-cephfs-secret
|
||||
csiProvisionerSecretNamespace: default
|
||||
csiNodeStageSecretName: csi-cephfs-secret
|
||||
csiNodeStageSecretNamespace: default
|
||||
|
||||
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
|
||||
# If left out, default volume mounter will be used - this is determined by probing for ceph-fuse
|
||||
# or by setting the default mounter explicitly via --volumemounter command-line argument.
|
||||
# mounter: kernel
|
||||
reclaimPolicy: Delete
|
15
examples/rbd/exec-bash.sh
Executable file
15
examples/rbd/exec-bash.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONTAINER_NAME=csi-rbdplugin
|
||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||
|
||||
function get_pod_status() {
|
||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
||||
}
|
||||
|
||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||
sleep 1
|
||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||
done
|
||||
|
||||
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
|
15
examples/rbd/logs.sh
Executable file
15
examples/rbd/logs.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONTAINER_NAME=csi-rbdplugin
|
||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||
|
||||
function get_pod_status() {
|
||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
||||
}
|
||||
|
||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||
sleep 1
|
||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||
done
|
||||
|
||||
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
|
15
examples/rbd/plugin-deploy.sh
Executable file
15
examples/rbd/plugin-deploy.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
deployment_base="${1}"
|
||||
|
||||
if [[ -z $deployment_base ]]; then
|
||||
deployment_base="../../deploy/rbd/kubernetes"
|
||||
fi
|
||||
|
||||
cd "$deployment_base" || exit 1
|
||||
|
||||
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin)
|
||||
|
||||
for obj in ${objects[@]}; do
|
||||
kubectl create -f "./$obj.yaml"
|
||||
done
|
15
examples/rbd/plugin-teardown.sh
Executable file
15
examples/rbd/plugin-teardown.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
deployment_base="${1}"
|
||||
|
||||
if [[ -z $deployment_base ]]; then
|
||||
deployment_base="../../deploy/rbd/kubernetes"
|
||||
fi
|
||||
|
||||
cd "$deployment_base" || exit 1
|
||||
|
||||
objects=(csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
||||
|
||||
for obj in ${objects[@]}; do
|
||||
kubectl delete -f "./$obj.yaml"
|
||||
done
|
17
examples/rbd/pod.yaml
Normal file
17
examples/rbd/pod.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: csirbd-demo-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: web-server
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: mypvc
|
||||
mountPath: /var/lib/www/html
|
||||
volumes:
|
||||
- name: mypvc
|
||||
persistentVolumeClaim:
|
||||
claimName: rbd-pvc
|
||||
readOnly: false
|
||||
|
11
examples/rbd/pvc.yaml
Normal file
11
examples/rbd/pvc.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: rbd-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: csi-rbd
|
8
examples/rbd/secret.yaml
Normal file
8
examples/rbd/secret.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: csi-rbd-secret
|
||||
namespace: default
|
||||
data:
|
||||
# Key value corresponds to a user name defined in ceph cluster
|
||||
admin: BASE64-ENCODED-PASSWORD
|
24
examples/rbd/storageclass.yaml
Normal file
24
examples/rbd/storageclass.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-rbd
|
||||
provisioner: csi-rbdplugin
|
||||
parameters:
|
||||
# Comma separated list of Ceph monitors
|
||||
monitors: mon1:port,mon2:port,...
|
||||
|
||||
# Ceph pool into which the RBD image shall be created
|
||||
pool: rbd
|
||||
|
||||
# RBD image format. Defaults to "2".
|
||||
imageFormat: "2"
|
||||
|
||||
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
|
||||
imageFeatures: layering
|
||||
|
||||
# The secrets have to contain Ceph admin credentials.
|
||||
csiProvisionerSecretName: csi-rbd-secret
|
||||
csiProvisionerSecretNamespace: default
|
||||
csiNodePublishSecretName: csi-rbd-secret
|
||||
csiNodePublishSecretNamespace: default
|
||||
reclaimPolicy: Delete
|
Loading…
Reference in New Issue
Block a user