refact: Remove Kubernetes 1.13.x support

Signed-off-by: wilmardo <info@wilmardenouden.nl>
This commit is contained in:
wilmardo 2019-12-19 16:35:58 +01:00 committed by mergify[bot]
parent 70c709155c
commit f04af5742d
46 changed files with 41 additions and 1570 deletions

View File

@ -58,12 +58,11 @@ jobs:
- make go-test
- make dep-check || travis_terminate 1;
- name: cephcsi with kube 1.13.7
env: K8S_FEATURE_GATES="BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true"
- name: cephcsi with kube 1.14.10
script:
- scripts/skip-doc-change.sh || travis_terminate 0;
- make image-cephcsi || travis_terminate 1;
- scripts/travis-functest.sh v1.13.7 || travis_terminate 1;
- scripts/travis-functest.sh v1.14.10 || travis_terminate 1;
- name: cephcsi with kube 1.15.6
script:

View File

@ -61,16 +61,16 @@ NOTE:
| Plugin | Features | Feature Status | CSI Driver Version | CSI Spec Version | Ceph Cluster Version | Kubernetes Version |
| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ |
| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.13.0 |
| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.13.0 |
| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.13.0 |
| | Creating and deleting snapshot | Alpha | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.13.0 |
| | Provision volume from snapshot | Alpha | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.13.0 |
| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.14.0 |
| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.14.0 |
| | Creating and deleting snapshot | Alpha | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.14.0 |
| | Provision volume from snapshot | Alpha | >= v1.0.0 | >= v1.0.0 | Mimic (>=v13.0.0) | >= v1.14.0 |
| | Provision volume from another volume | - | - | - | - | - |
| | Resize volume | Beta | >= v2.0.0 | >= v1.1.0 | Mimic (>=v13.0.0) | >= v1.15.0 |
| | Metrics Support | Beta | >= v1.2.0 | >= v1.1.0 | Mimic (>=v13.0.0) | >= v1.15.0 |
| CephFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.13.0 |
| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.13.0 |
| CephFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
| | Creating and deleting snapshot | - | - | - | - | - |
| | Provision volume from snapshot | - | - | - | - | - |
| | Provision volume from another volume | - | - | - | - | - |

View File

@ -1,4 +1,3 @@
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion -}}
kind: Deployment
apiVersion: apps/v1
metadata:
@ -195,4 +194,3 @@ spec:
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}
{{- end -}}

View File

@ -17,9 +17,7 @@ rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end -}}
{{- end -}}

View File

@ -1,27 +0,0 @@
{{- if semverCompare "<=1.13" .Capabilities.KubeVersion.GitVersion -}}
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
{{- end -}}

View File

@ -1,176 +0,0 @@
{{- if semverCompare "<=1.13" .Capabilities.KubeVersion.GitVersion -}}
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceName: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
replicas: 1
selector:
matchLabels:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
contains: liveness
spec:
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.provisioner.image.repository }}:{{ .Values.provisioner.provisioner.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.provisioner.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.provisioner.resources | indent 12 }}
{{- if .Values.provisioner.attacher.enabled }}
- name: csi-attacher
image: "{{ .Values.provisioner.attacher.image.repository }}:{{ .Values.provisioner.attacher.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.attacher.image.pullPolicy }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.attacher.resources | indent 12 }}
{{- end }}
- name: csi-cephfsplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--controllerserver=true"
- "--pidlimit=-1"
{{- if .Values.provisioner.grpcMetrics.enabled }}
- "--metricsport={{ .Values.nodeplugin.grpcMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=true"
{{- end }}
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.provisioner.httpMetrics.enabled }}
- name: liveness-prometheus
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.provisioner.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.tolerations }}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}
{{- end -}}

View File

@ -116,8 +116,6 @@ nodeplugin:
provisioner:
name: provisioner
# When using Kubernetes <1.14 this value is ignored
# The statefulset is deployed with replicas: 1
replicaCount: 3
# Timeout for waiting for creation or deletion of a volume
timeout: 60s

View File

@ -1,4 +1,3 @@
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion -}}
kind: Deployment
apiVersion: apps/v1
metadata:
@ -210,4 +209,3 @@ spec:
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}
{{- end -}}

View File

@ -14,9 +14,7 @@ rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end -}}
{{- end -}}

View File

@ -51,19 +51,13 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion }}
verbs: ["create", "list", "watch", "delete", "get", "update"]
{{- else }}
verbs: ["create"]
{{- end -}}
{{- if semverCompare ">=1.14" .Capabilities.KubeVersion.GitVersion }}
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
{{- if .Values.provisioner.resizer.enabled }}
{{- if .Values.provisioner.resizer.enabled }}
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,27 +0,0 @@
{{- if semverCompare "<=1.13" .Capabilities.KubeVersion.GitVersion -}}
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
{{- end -}}

View File

@ -1,193 +0,0 @@
{{- if semverCompare "<=1.13" .Capabilities.KubeVersion.GitVersion -}}
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceName: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
replicas: 1
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
contains: liveness
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.provisioner.image.repository }}:{{ .Values.provisioner.provisioner.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.provisioner.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.provisioner.resources | indent 12 }}
- name: csi-snapshotter
image: {{ .Values.provisioner.snapshotter.image.repository }}:{{ .Values.provisioner.snapshotter.image.tag }}
imagePullPolicy: {{ .Values.provisioner.snapshotter.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout={{ .Values.provisioner.timeout }}"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.snapshotter.resources | indent 12 }}
{{- if .Values.provisioner.attacher.enabled }}
- name: csi-attacher
image: "{{ .Values.provisioner.attacher.image.repository }}:{{ .Values.provisioner.attacher.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.attacher.image.pullPolicy }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.attacher.resources | indent 12 }}
{{- end }}
- name: csi-rbdplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--pidlimit=-1"
{{- if .Values.provisioner.grpcMetrics.enabled }}
- "--metricsport={{ .Values.nodeplugin.grpcMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=true"
{{- end }}
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.provisioner.httpMetrics.enabled }}
- name: liveness-prometheus
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.provisioner.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.tolerations }}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}
{{- end -}}

View File

@ -116,8 +116,6 @@ nodeplugin:
provisioner:
name: provisioner
# When using Kubernetes <1.14 this value is ignored
# The statefulset is deployed with replicas: 1
replicaCount: 3
# Timeout for waiting for creation or deletion of a volume
timeout: 60s

View File

@ -1,154 +0,0 @@
---
kind: Service
apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
labels:
app: csi-metrics
spec:
selector:
app: csi-cephfsplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin-provisioner
spec:
selector:
matchLabels:
app: csi-cephfsplugin-provisioner
serviceName: "csi-cephfsplugin-provisioner"
replicas: 1
template:
metadata:
labels:
app: csi-cephfsplugin-provisioner
spec:
serviceAccount: cephfs-csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.4.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-cephfsplugin-attacher
image: quay.io/k8scsi/csi-attacher:v1.2.1
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-cephfsplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--pidlimit=-1"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -1,185 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin
spec:
selector:
matchLabels:
app: csi-cephfsplugin
template:
metadata:
labels:
app: csi-cephfsplugin
spec:
serviceAccount: cephfs-csi-nodeplugin
hostNetwork: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
lifecycle:
preStop:
exec:
command: [
"/bin/sh", "-c",
"rm -rf /registration/cephfs.csi.ceph.com \
/registration/cephfs.csi.ceph.com-reg.sock"
]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-cephfsplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--mountcachedir=/mount-cache-dir"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: mount-cache-dir
mountPath: /mount-cache-dir
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
securityContext:
privileged: true
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: mount-cache-dir
emptyDir: {}
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
---
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-cephfsplugin
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
selector:
app: csi-cephfsplugin

View File

@ -1,94 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.cephfs.csi.ceph.com/aggregate-to-cephfs-external-provisioner-runner: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner-rules
labels:
rbac.cephfs.csi.ceph.com/aggregate-to-cephfs-external-provisioner-runner: "true"
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: cephfs-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -1,53 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.cephfs.csi.ceph.com/aggregate-to-cephfs-csi-nodeplugin: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin-rules
labels:
rbac.cephfs.csi.ceph.com/aggregate-to-cephfs-csi-nodeplugin: "true"
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: cephfs-csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[]
metadata:
name: ceph-csi-config

View File

@ -1,106 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.rbd.csi.ceph.com/aggregate-to-rbd-external-provisioner-runner: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner-rules
labels:
rbac.rbd.csi.ceph.com/aggregate-to-rbd-external-provisioner-runner: "true"
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -1,168 +0,0 @@
---
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
labels:
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin-provisioner
spec:
serviceName: "csi-rbdplugin-provisioner"
replicas: 1
selector:
matchLabels:
app: csi-rbdplugin-provisioner
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
serviceAccount: rbd-csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.4.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v1.2.2
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v1.2.1
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -1,175 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin
spec:
selector:
matchLabels:
app: csi-rbdplugin
template:
metadata:
labels:
app: csi-rbdplugin
spec:
serviceAccount: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
lifecycle:
preStop:
exec:
command: [
"/bin/sh", "-c",
"rm -rf /registration/rbd.csi.ceph.com \
/registration/rbd.csi.ceph.com-reg.sock"
]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
securityContext:
privileged: true
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
---
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
selector:
app: csi-rbdplugin

View File

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[]
metadata:
name: ceph-csi-config

View File

@ -1,56 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.rbd.csi.ceph.com/aggregate-to-rbd-csi-nodeplugin: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin-rules
labels:
rbac.rbd.csi.ceph.com/aggregate-to-rbd-csi-nodeplugin: "true"
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -6,7 +6,7 @@
- [Upgrading CephFS](#upgrading-cephfs)
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
- [1.2 Update the CephFS Provisioner deployment/statefulset](#12-update-the-cephfs-provisioner-deploymentstatefulset)
- [1.2 Update the CephFS Provisioner deployment](#12-update-the-cephfs-provisioner-deployment)
- [2. Upgrade CephFS Nodeplugin resources](#2-upgrade-cephfs-nodeplugin-resources)
- [2.1 Update the CephFS Nodeplugin RBAC](#21-update-the-cephfs-nodeplugin-rbac)
- [2.2 Update the CephFS Nodeplugin daemonset](#22-update-the-cephfs-nodeplugin-daemonset)
@ -14,7 +14,7 @@
- [Upgrading RBD](#upgrading-rbd)
- [3. Upgrade RBD Provisioner resources](#3-upgrade-rbd-provisioner-resources)
- [3.1 Update the RBD Provisioner RBAC](#31-update-the-rbd-provisioner-rbac)
- [3.2 Update the RBD Provisioner deployment/statefulset](#32-update-the-rbd-provisioner-deploymentstatefulset)
- [3.2 Update the RBD Provisioner deployment](#32-update-the-rbd-provisioner-deployment)
- [4. Upgrade RBD Nodeplugin resources](#4-upgrade-rbd-nodeplugin-resources)
- [4.1 Update the RBD Nodeplugin RBAC](#41-update-the-rbd-nodeplugin-rbac)
- [4.2 Update the RBD Nodeplugin daemonset](#42-update-the-rbd-nodeplugin-daemonset)
@ -75,9 +75,6 @@ Upgrading cephfs csi includes upgrade of cephfs driver and as well as
kubernetes sidecar containers and also the permissions required for the
kubernetes sidecar containers, lets upgrade the things one by one
**Note** If you are using ceph-csi with kubernetes v1.13 use templates from
v1.13 directory
#### 1. Upgrade CephFS Provisioner resources
Upgrade provisioner resources include updating the provisioner RBAC and
@ -86,7 +83,7 @@ Provisioner deployment
##### 1.1 Update the CephFS Provisioner RBAC
```bash
[$] kubectl apply -f deploy/cephfs/kubernetes/v1.14+/csi-provisioner-rbac.yaml
[$] kubectl apply -f deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml
serviceaccount/cephfs-csi-provisioner configured
clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner configured
clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner-rules configured
@ -95,10 +92,10 @@ role.rbac.authorization.k8s.io/cephfs-external-provisioner-cfg configured
rolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role-cfg configured
```
##### 1.2 Update the CephFS Provisioner deployment/statefulset
##### 1.2 Update the CephFS Provisioner deployment
```bash
[$]kubectl apply -f deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin-provisioner.yaml
[$]kubectl apply -f deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml
service/csi-cephfsplugin-provisioner configured
deployment.apps/csi-cephfsplugin-provisioner configured
```
@ -121,7 +118,7 @@ nodeplugin daemonset
##### 2.1 Update the CephFS Nodeplugin RBAC
```bash
[$]kubectl apply -f deploy/cephfs/kubernetes/v1.14+/csi-nodeplugin-rbac.yaml
[$]kubectl apply -f deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml
serviceaccount/cephfs-csi-nodeplugin configured
clusterrole.rbac.authorization.k8s.io/cephfs-csi-nodeplugin configured
clusterrole.rbac.authorization.k8s.io/cephfs-csi-nodeplugin-rules configured
@ -134,7 +131,7 @@ application pods from their mounts, continue with this section. Otherwise, you
can skip to step 2.2
```console
vi deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin.yaml
vi deploy/cephfs/kubernetes/csi-cephfsplugin.yaml
```
```yaml
@ -162,7 +159,7 @@ daemonset spec
##### 2.2 Update the CephFS Nodeplugin daemonset
```bash
[$]kubectl apply -f deploy/cephfs/kubernetes/v1.14+/csi-cephfsplugin.yaml
[$]kubectl apply -f deploy/cephfs/kubernetes/csi-cephfsplugin.yaml
daemonset.apps/csi-cephfsplugin configured
service/csi-metrics-cephfsplugin configured
```
@ -196,9 +193,6 @@ Upgrading rbd csi includes upgrade of rbd driver and as well as kubernetes
sidecar containers and also the permissions required for the kubernetes sidecar
containers, lets upgrade the things one by one
**Note:** If you are using ceph-csi with kubernetes v1.13 use templates from
v1.13 directory
#### 3. Upgrade RBD Provisioner resources
Upgrading provisioner resources include updating the provisioner RBAC and
@ -207,7 +201,7 @@ Provisioner deployment
##### 3.1 Update the RBD Provisioner RBAC
```bash
[$]kubectl apply -f deploy/rbd/kubernetes/v1.14+/csi-provisioner-rbac.yaml
[$]kubectl apply -f deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
serviceaccount/rbd-csi-provisioner configured
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner configured
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner-rules configured
@ -216,10 +210,10 @@ role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg configured
rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg configured
```
##### 3.2 Update the RBD Provisioner deployment/statefulset
##### 3.2 Update the RBD Provisioner deployment
```bash
[$]kubectl apply -f deploy/rbd/kubernetes/v1.14+/csi-rbdplugin-provisioner.yaml
[$]kubectl apply -f deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
service/csi-rbdplugin-provisioner configured
deployment.apps/csi-rbdplugin-provisioner configured
```
@ -242,7 +236,7 @@ nodeplugin daemonset
##### 4.1 Update the RBD Nodeplugin RBAC
```bash
[$]kubectl apply -f deploy/rbd/kubernetes/v1.14+/csi-nodeplugin-rbac.yaml
[$]kubectl apply -f deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
serviceaccount/rbd-csi-nodeplugin configured
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin configured
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin-rules configured
@ -255,7 +249,7 @@ application pods from their mounts, continue with this section. Otherwise, you
can skip to step 4.2
```console
vi deploy/rbd/kubernetes/v1.14+/csi-rbdplugin.yaml
vi deploy/rbd/kubernetes/csi-rbdplugin.yaml
```
```yaml
@ -283,7 +277,7 @@ daemonset spec
##### 4.2 Update the RBD Nodeplugin daemonset
```bash
[$]kubectl apply -f deploy/rbd/kubernetes/v1.14+/csi-rbdplugin.yaml
[$]kubectl apply -f deploy/rbd/kubernetes/csi-rbdplugin.yaml
daemonset.apps/csi-rbdplugin configured
service/csi-metrics-rbdplugin configured
```

View File

@ -115,11 +115,9 @@ for a zero-sized volume means no quota attribute will be set.
## Deployment with Kubernetes
Requires Kubernetes 1.13+
Requires Kubernetes 1.14+
if your cluster version is 1.13.x please use [cephfs v1.13
templates](../deploy/cephfs/kubernetes/v1.13) or else use [cephfs v1.14+
templates](../deploy/cephfs/kubernetes/v1.14+)
Use the [cephfs templates](../deploy/cephfs/kubernetes)
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
flag must be set to true for both the API server and the kubelet). Moreover, as

View File

@ -71,11 +71,9 @@ is required for provisioning new RBD images.
## Deployment with Kubernetes
Requires Kubernetes 1.13+
Requires Kubernetes 1.14+
if your cluster version is 1.13.x please use [rbd v1.13
templates](../deploy/rbd/kubernetes/v1.13) or else use [rbd v1.14+
templates](../deploy/rbd/kubernetes/v1.14+)
Use the [rbd templates](../deploy/rbd/kubernetes)
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
flag must be set to true for both the API server and the kubelet). Moreover, as

View File

@ -46,7 +46,7 @@ following environment variables can be exported to customize kubernetes deployme
| ENV | Description | Default |
| ------------------ | ------------------------------------------------ | ------------------------------------------------------------------ |
| MINIKUBE_VERSION | minikube version to install | latest |
| KUBE_VERSION | kubernetes version to install | v1.13.0 |
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
| VM_DRIVER | VM driver to create virtual machine | virtualbox |
| CEPHCSI_IMAGE_REPO | Repo URL to pull cephcsi images | quay.io/cephcsi |

View File

@ -2,7 +2,6 @@ package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo" // nolint
@ -18,15 +17,10 @@ var (
cephfsNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
cephfsDeploymentName = "csi-cephfsplugin-provisioner"
cephfsDeamonSetName = "csi-cephfsplugin"
cephfsDirPath = "../deploy/cephfs/kubernetes"
cephfsDirPath = "../deploy/cephfs/kubernetes/"
cephfsExamplePath = "../examples/cephfs/"
)
func updateCephfsDirPath(c clientset.Interface) {
version := getKubeVersionToDeploy(c)
cephfsDirPath = fmt.Sprintf("%s/%s/", cephfsDirPath, version)
}
func deployCephfsPlugin() {
// delete objects deployed by rook
framework.RunKubectlOrDie("delete", "--ignore-not-found=true", "-f", cephfsDirPath+cephfsProvisionerRBAC)
@ -64,7 +58,6 @@ var _ = Describe("cephfs", func() {
// deploy cephfs CSI
BeforeEach(func() {
c = f.ClientSet
updateCephfsDirPath(f.ClientSet)
createConfigMap(cephfsDirPath, f.ClientSet, f)
deployCephfsPlugin()
createCephfsSecret(f.ClientSet, f)
@ -88,15 +81,9 @@ var _ = Describe("cephfs", func() {
pvcPath := cephfsExamplePath + "pvc.yaml"
appPath := cephfsExamplePath + "pod.yaml"
By("checking provisioner statefulset/deployment is running")
timeout := time.Duration(deployTimeout) * time.Minute
By("checking provisioner deployment is running")
var err error
sts := deployProvAsSTS(f.ClientSet)
if sts {
err = waitForStatefulSetReplicasReady(cephfsDeploymentName, namespace, f.ClientSet, 1*time.Second, timeout)
} else {
err = waitForDeploymentComplete(cephfsDeploymentName, namespace, f.ClientSet, deployTimeout)
}
err = waitForDeploymentComplete(cephfsDeploymentName, namespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}

View File

@ -2,7 +2,6 @@ package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo" // nolint
@ -17,18 +16,13 @@ var (
rbdNodePlugin = "csi-rbdplugin.yaml"
rbdNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
configMap = "csi-config-map.yaml"
rbdDirPath = "../deploy/rbd/kubernetes"
rbdDirPath = "../deploy/rbd/kubernetes/"
rbdExamplePath = "../examples/rbd/"
rbdDeploymentName = "csi-rbdplugin-provisioner"
rbdDaemonsetName = "csi-rbdplugin"
namespace = "default"
)
func updaterbdDirPath(c clientset.Interface) {
version := getKubeVersionToDeploy(c)
rbdDirPath = fmt.Sprintf("%s/%s/", rbdDirPath, version)
}
func deployRBDPlugin() {
// delete objects deployed by rook
framework.RunKubectlOrDie("delete", "--ignore-not-found=true", "-f", rbdDirPath+rbdProvisionerRBAC)
@ -66,7 +60,6 @@ var _ = Describe("RBD", func() {
// deploy RBD CSI
BeforeEach(func() {
c = f.ClientSet
updaterbdDirPath(f.ClientSet)
createConfigMap(rbdDirPath, f.ClientSet, f)
deployRBDPlugin()
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
@ -98,15 +91,9 @@ var _ = Describe("RBD", func() {
// appClonePath := rbdExamplePath + "pod-restore.yaml"
// snapshotPath := rbdExamplePath + "snapshot.yaml"
By("checking provisioner statefulset/deployment is running")
timeout := time.Duration(deployTimeout) * time.Minute
By("checking provisioner deployment is running")
var err error
sts := deployProvAsSTS(f.ClientSet)
if sts {
err = waitForStatefulSetReplicasReady(rbdDeploymentName, namespace, f.ClientSet, 1*time.Second, timeout)
} else {
err = waitForDeploymentComplete(rbdDeploymentName, namespace, f.ClientSet, deployTimeout)
}
err = waitForDeploymentComplete(rbdDeploymentName, namespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}

View File

@ -9,8 +9,6 @@ import (
"strings"
"time"
"k8s.io/klog"
// _ "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" // nolint
// _ "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1" // nolint
. "github.com/onsi/ginkgo" // nolint
@ -45,31 +43,6 @@ var poll = 2 * time.Second
// Timestamp string `json:"timestamp"`
// }
func deployProvAsSTS(c clientset.Interface) bool {
// kubeMinor to use deployment instead of statefulset for provisioner
const kubeMinor = "14"
v, err := c.Discovery().ServerVersion()
if err != nil {
klog.Errorf("failed to get server version with error %v", err)
return false
}
if v.Minor < kubeMinor {
return true
}
return false
}
func getKubeVersionToDeploy(c clientset.Interface) string {
sts := deployProvAsSTS(c)
version := ""
if sts {
version = "v1.13"
} else {
version = "v1.14+"
}
return version
}
func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error {
timeout := time.Duration(t) * time.Minute
start := time.Now()
@ -135,23 +108,6 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er
return nil
}
func waitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, poll, err)
continue
}
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
framework.Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start))
return nil
}
framework.Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
}
return fmt.Errorf("statefulSet %s still has unready pods within %v", statefulSetName, timeout)
}
func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) {
cmd := []string{"/bin/sh", "-c", c}
podList, err := f.PodClientNS(ns).List(*opt)

View File

@ -1,4 +1,4 @@
# How to test RBD and CephFS plugins with Kubernetes 1.13
# How to test RBD and CephFS plugins with Kubernetes 1.14+
## Deploying Ceph-CSI services

View File

@ -3,7 +3,7 @@
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/cephfs/kubernetes/v1.14+"
deployment_base="../../deploy/cephfs/kubernetes"
fi
cd "$deployment_base" || exit 1

View File

@ -3,7 +3,7 @@
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/cephfs/kubernetes/v1.14+"
deployment_base="../../deploy/cephfs/kubernetes"
fi
cd "$deployment_base" || exit 1

View File

@ -3,7 +3,7 @@
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/rbd/kubernetes/v1.14+"
deployment_base="../../deploy/rbd/kubernetes"
fi
cd "$deployment_base" || exit 1

View File

@ -3,7 +3,7 @@
deployment_base="${1}"
if [[ -z $deployment_base ]]; then
deployment_base="../../deploy/rbd/kubernetes/v1.14+"
deployment_base="../../deploy/rbd/kubernetes"
fi
cd "$deployment_base" || exit 1

View File

@ -56,7 +56,7 @@ function install_kubectl() {
# configure minikube
MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"}
MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"}
KUBE_VERSION=${KUBE_VERSION:-"v1.14.2"}
KUBE_VERSION=${KUBE_VERSION:-"v1.14.10"}
MEMORY=${MEMORY:-"3000"}
VM_DRIVER=${VM_DRIVER:-"virtualbox"}
#configure image repo