deployment update for 0.3.0

This commit is contained in:
gman 2018-07-18 16:48:43 +02:00
parent 2fa82b47ea
commit e2910f1c18
32 changed files with 424 additions and 616 deletions

View File

@ -15,10 +15,10 @@
.PHONY: all rbdplugin .PHONY: all rbdplugin
RBD_IMAGE_NAME=quay.io/cephcsi/rbdplugin RBD_IMAGE_NAME=quay.io/cephcsi/rbdplugin
RBD_IMAGE_VERSION=v0.2.0 RBD_IMAGE_VERSION=v0.3.0
CEPHFS_IMAGE_NAME=quay.io/cephcsi/cephfsplugin CEPHFS_IMAGE_NAME=quay.io/cephcsi/cephfsplugin
CEPHFS_IMAGE_VERSION=v0.2.0 CEPHFS_IMAGE_VERSION=v0.3.0
all: rbdplugin cephfsplugin all: rbdplugin cephfsplugin
@ -30,7 +30,7 @@ rbdplugin:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd
rbdplugin-container: rbdplugin image-rbdplugin: rbdplugin
cp _output/rbdplugin deploy/rbd/docker cp _output/rbdplugin deploy/rbd/docker
docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
@ -38,14 +38,14 @@ cephfsplugin:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cephfs CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cephfs
cephfsplugin-container: cephfsplugin image-cephfsplugin: cephfsplugin
cp _output/cephfsplugin deploy/cephfs/docker cp _output/cephfsplugin deploy/cephfs/docker
docker build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker docker build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker
push-rbdplugin-container: rbdplugin-container push-image-rbdplugin: image-rbdplugin
docker push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) docker push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION)
push-cephfsplugin-container: cephfsplugin-container push-image-cephfsplugin: image-cephfsplugin
docker push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) docker push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION)
clean: clean:

View File

@ -2,5 +2,5 @@
if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
make push-rbdplugin-container push-cephfsplugin-container make push-image-rbdplugin push-image-cephfsplugin
fi fi

View File

@ -1,28 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs
provisioner: csi-cephfsplugin
parameters:
monitors: mon1:port,mon2:port
# If set to true, a new volume will be created along with a RADOS user - this requires admin access.
# If set to false, it is assumed the volume already exists and the user is expected to provide
# a rootPath to a cephfs volume and user credentials.
provisionVolume: "true"
# Required if provisionVolume is set to false
# rootPath: /path-in-cephfs
# Required if provisionVolume is set to true
# pool: cephfs_data
# The secret has to contain user and/or admin credentials.
csiProvisionerSecretName: csi-cephfs-secret
csiProvisionerSecretNameSpace: default
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
# If left out, default volume mounter will be used - this is determined by probing for ceph-fuse
# or by setting the default mounter explicitly via --volumemounter command-line argument.
# mounter: kernel
reclaimPolicy: Delete

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,87 +0,0 @@
# This YAML file contains RBAC API objects,
# which are necessary to run external csi attacher for cinder.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-attacher
labels:
app: csi-attacher
spec:
selector:
app: csi-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-attacher
spec:
serviceName: "csi-attacher"
replicas: 1
template:
metadata:
labels:
app: csi-attacher
spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate

View File

@ -0,0 +1,45 @@
kind: Service
apiVersion: v1
metadata:
name: csi-cephfsplugin-attacher
labels:
app: csi-cephfsplugin-attacher
spec:
selector:
app: csi-cephfsplugin-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-cephfsplugin-attacher
spec:
serviceName: "csi-cephfsplugin-attacher"
replicas: 1
template:
metadata:
labels:
app: csi-cephfsplugin-attacher
spec:
serviceAccount: csi-attacher
containers:
- name: csi-cephfsplugin-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate

View File

@ -0,0 +1,46 @@
kind: Service
apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
labels:
app: csi-cephfsplugin-provisioner
spec:
selector:
app: csi-cephfsplugin-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-cephfsplugin-provisioner
spec:
serviceName: "csi-cephfsplugin-provisioner"
replicas: 1
template:
metadata:
labels:
app: csi-cephfsplugin-provisioner
spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.3.0
args:
- "--provisioner=csi-cephfsplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate

View File

@ -1,46 +1,3 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cephfsplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-cephfsplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-cephfsplugin
subjects:
- kind: ServiceAccount
name: csi-cephfsplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-cephfsplugin
apiGroup: rbac.authorization.k8s.io
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for cephfs.
kind: DaemonSet kind: DaemonSet
apiVersion: apps/v1beta2 apiVersion: apps/v1beta2
metadata: metadata:
@ -54,11 +11,11 @@ spec:
labels: labels:
app: csi-cephfsplugin app: csi-cephfsplugin
spec: spec:
serviceAccount: csi-cephfsplugin serviceAccount: csi-nodeplugin
hostNetwork: true hostNetwork: true
containers: containers:
- name: driver-registrar - name: driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.2.0 image: quay.io/k8scsi/driver-registrar:v0.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
@ -78,7 +35,7 @@ spec:
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephfsplugin:v0.2.0 image: quay.io/cephcsi/cephfsplugin:v0.3.0
args : args :
- "--nodeid=$(NODE_ID)" - "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
subjects:
- kind: ServiceAccount
name: csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,97 +0,0 @@
# This YAML file contains all API objects that are necessary to run external
# CSI provisioner.
#
# In production, this needs to be in separate files, e.g. service account and
# role and role binding needs to be created once, while stateful set may
# require some tuning.
#
# In addition, mock CSI driver is hardcoded as the CSI driver.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner
labels:
app: csi-provisioner
spec:
selector:
app: csi-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-provisioner
spec:
serviceName: "csi-provisioner"
replicas: 1
template:
metadata:
labels:
app: csi-provisioner
spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.2.1
args:
- "--provisioner=csi-cephfsplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate

View File

@ -1,7 +0,0 @@
#!/bin/bash
objects=(cephfs-storage-class cephfsplugin csi-attacher csi-provisioner)
for obj in ${objects[@]}; do
kubectl create -f "./$obj.yaml"
done

View File

@ -1,4 +0,0 @@
#!/bin/sh
kubectl create -f ./pvc.yaml
kubectl create -f ./pod.yaml

View File

@ -1,3 +0,0 @@
#!/bin/sh
kubectl exec -it $(kubectl get pods -l app=csi-cephfsplugin -o=name | head -n 1 | cut -f2 -d"/") -c csi-cephfsplugin bash

View File

@ -1,3 +0,0 @@
#!/bin/sh
kubectl logs $(kubectl get pods -l app=csi-cephfsplugin -o=name | head -n 1) -c csi-cephfsplugin

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: web-server
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- mountPath: /var/lib/www/html
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-cephfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: csi-cephfs

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: csi-cephfs-secret
namespace: default
data:
# Required if provisionVolume is set to false
userID: userID-encoded-by-base64
userKey: userKey-encoded-by-base64
# Required if provisionVolume is set to true
adminID: adminID-encoded-by-base64
adminKey: adminKey-encoded-by-base64

View File

@ -1,7 +0,0 @@
#!/bin/bash
objects=(cephfsplugin csi-provisioner csi-attacher cephfs-storage-class)
for obj in ${objects[@]}; do
kubectl delete -f "./$obj.yaml"
done

View File

@ -1,4 +0,0 @@
#!/bin/sh
kubectl delete -f ./pod.yaml
kubectl delete -f ./pvc.yaml

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,87 +0,0 @@
# This YAML file contains RBAC API objects,
# which are necessary to run external csi attacher for cinder.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-attacher
labels:
app: csi-attacher
spec:
selector:
app: csi-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-attacher
spec:
serviceName: "csi-attacher"
replicas: 1
template:
metadata:
labels:
app: csi-attacher
spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
subjects:
- kind: ServiceAccount
name: csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,97 +0,0 @@
# This YAML file contains all API objects that are necessary to run external
# CSI provisioner.
#
# In production, this needs to be in separate files, e.g. service account and
# role and role binding needs to be created once, while stateful set may
# require some tuning.
#
# In addition, mock CSI driver is hardcoded as the CSI driver.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner
labels:
app: csi-provisioner
spec:
selector:
app: csi-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-provisioner
spec:
serviceName: "csi-provisioner"
replicas: 1
template:
metadata:
labels:
app: csi-provisioner
spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.2.0
args:
- "--provisioner=csi-rbdplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -0,0 +1,45 @@
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-attacher
labels:
app: csi-rbdplugin-attacher
spec:
selector:
app: csi-rbdplugin-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-rbdplugin-attacher
spec:
serviceName: "csi-rbdplugin-attacher"
replicas: 1
template:
metadata:
labels:
app: csi-rbdplugin-attacher
spec:
serviceAccount: csi-attacher
containers:
- name: csi-rbdplugin-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -0,0 +1,46 @@
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
labels:
app: csi-rbdplugin-provisioner
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-rbdplugin-provisioner
spec:
serviceName: "csi-rbdplugin-provisioner"
replicas: 1
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.3.0
args:
- "--provisioner=csi-rbdplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -1,46 +1,3 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-rbdplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-rbdplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-rbdplugin
subjects:
- kind: ServiceAccount
name: csi-rbdplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-rbdplugin
apiGroup: rbac.authorization.k8s.io
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for rbd.
kind: DaemonSet kind: DaemonSet
apiVersion: apps/v1beta2 apiVersion: apps/v1beta2
metadata: metadata:
@ -54,11 +11,11 @@ spec:
labels: labels:
app: csi-rbdplugin app: csi-rbdplugin
spec: spec:
serviceAccount: csi-rbdplugin serviceAccount: csi-nodeplugin
hostNetwork: true hostNetwork: true
containers: containers:
- name: driver-registrar - name: driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.2.0 image: quay.io/k8scsi/driver-registrar:v0.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
@ -78,7 +35,7 @@ spec:
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
image: quay.io/cephcsi/rbdplugin:v0.2.0 image: quay.io/cephcsi/rbdplugin:v0.3.0
args : args :
- "--nodeid=$(NODE_ID)" - "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: csi-rbd

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: csi-ceph-secret
namespace: default
data:
#Please note this value is base64 encoded.
# Key value corresponds to a user name defined in ceph cluster
admin: QVFDZUhPMVpJTFBQRFJBQTd6dzNkNzZicGxrdlR3em9vc3lidkE9PQo=
kubernetes: QVFDZDR1MVoxSDI0QnhBQWFxdmZIRnFuMSs0RFZlK1pRZ0ZmUEE9PQo=

View File

@ -1,13 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd
provisioner: csi-rbdplugin
parameters:
monitors: 192.168.80.233:6789
pool: kubernetes
csiProvisionerSecretName: csi-ceph-secret
csiProvisionerSecretNamespace: default
imageFormat: "2"
imageFeatures: layering
reclaimPolicy: Delete

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: web-server
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- mountPath: /var/lib/www/html
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false