added examples

This commit is contained in:
gman
2018-07-18 16:49:15 +02:00
parent e2910f1c18
commit e8ea0aa713
18 changed files with 298 additions and 0 deletions

View File

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: csicephfs-demo-depl
labels:
app: web-server
spec:
replicas: 1
selector:
matchLabels:
app: web-server
template:
metadata:
labels:
app: web-server
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false

15
examples/cephfs/exec-bash.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
CONTAINER_NAME=csi-cephfsplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
}
while [[ "$(get_pod_status)" != "Running" ]]; do
sleep 1
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash

15
examples/cephfs/logs.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
CONTAINER_NAME=csi-cephfsplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
}
while [[ "$(get_pod_status)" != "Running" ]]; do
sleep 1
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl logs -f $POD_NAME -c $CONTAINER_NAME

View File

@ -0,0 +1,15 @@
#!/bin/bash
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/cephfs/kubernetes"
fi
cd "$deployment_base" || exit 1
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin)
for obj in ${objects[@]}; do
kubectl create -f "./$obj.yaml"
done

View File

@ -0,0 +1,15 @@
#!/bin/bash
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/cephfs/kubernetes"
fi
cd "$deployment_base" || exit 1
objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
for obj in ${objects[@]}; do
kubectl delete -f "./$obj.yaml"
done

17
examples/cephfs/pod.yaml Normal file
View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: csicephfs-demo-pod
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false

11
examples/cephfs/pvc.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-cephfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: csi-cephfs

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: csi-cephfs-secret
namespace: default
data:
# Required if provisionVolume is set to false
userID: BASE64-ENCODED-VALUE
userKey: BASE64-ENCODED-VALUE
# Required if provisionVolume is set to true
adminID: BASE64-ENCODED-VALUE
adminKey: BASE64-ENCODED-VALUE

View File

@ -0,0 +1,33 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs
provisioner: csi-cephfsplugin
parameters:
# Comma separated list of Ceph monitors
monitors: mon1:port,mon2:port,...
# If set to true, a new volume will be created along with a RADOS user - this requires admin access.
# If set to false, it is assumed the volume already exists and the user is expected to provide
# a rootPath to a cephfs volume and user credentials.
provisionVolume: "true"
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: cephfs_data
# Root path of an existing CephFS volume
# Required for provisionVolume: "false"
# rootPath: /absolute/path
# The secrets have to contain user and/or Ceph admin credentials.
csiProvisionerSecretName: csi-cephfs-secret
csiProvisionerSecretNamespace: default
csiNodeStageSecretName: csi-cephfs-secret
csiNodeStageSecretNamespace: default
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
# If left out, default volume mounter will be used - this is determined by probing for ceph-fuse
# or by setting the default mounter explicitly via --volumemounter command-line argument.
# mounter: kernel
reclaimPolicy: Delete