From 2d560ba0872569a46f0d890491c8aee702f2e5b6 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Fri, 24 May 2019 16:33:33 +0530 Subject: [PATCH] update ceph-csi to build and use a single docker image currently, we have 3 docker files(cephcsi,rbd,cephfs) in the ceph-csi repo. [commit ](https://github.com/ceph/ceph-csi/commit/85e121ebfe3cf2ee94bf7a8d37ad793ecb43c455) added by John to build a single image which can act as rbd or cephfs based on the input configuration. This PR updates the makefile and kubernetes templates to use the unified image and also its deletes the other two dockerfiles. Signed-off-by: Madhu Rajanna --- .gitignore | 8 +--- Makefile | 37 ++++------------ cmd/cephcsi.go | 2 +- deploy.sh | 5 +-- deploy/cephcsi/image/Dockerfile | 5 +-- deploy/cephfs/docker/Dockerfile | 15 ------- .../helm/templates/nodeplugin-daemonset.yaml | 7 +-- .../templates/provisioner-statefulset.yaml | 1 + deploy/cephfs/helm/values.yaml | 5 ++- .../csi-cephfsplugin-provisioner.yaml | 4 +- .../cephfs/kubernetes/csi-cephfsplugin.yaml | 4 +- deploy/rbd/docker/Dockerfile | 12 ------ .../helm/templates/nodeplugin-daemonset.yaml | 1 + .../templates/provisioner-statefulset.yaml | 1 + deploy/rbd/helm/values.yaml | 5 ++- .../kubernetes/csi-rbdplugin-provisioner.yaml | 4 +- deploy/rbd/kubernetes/csi-rbdplugin.yaml | 4 +- docs/deploy-cephfs.md | 43 ++++++++++--------- docs/deploy-rbd.md | 39 +++++++++-------- 19 files changed, 81 insertions(+), 121 deletions(-) delete mode 100644 deploy/cephfs/docker/Dockerfile delete mode 100644 deploy/rbd/docker/Dockerfile diff --git a/.gitignore b/.gitignore index f92b6b291..e2736d2cb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,8 @@ # build directory -/output*/ /_output*/ -/_output # docker build -/deploy/rbd/docker/rbdplugin -/deploy/cephfs/docker/cephfsplugin - -# rbdplugin executable -rbdplugin +/deploy/cephcsi/image/cephcsi # Emacs save files *~ diff --git a/Makefile b/Makefile index 3c4bf8d24..17b8b3d15 100644 --- a/Makefile +++ b/Makefile @@ -16,17 +16,10 @@ CONTAINER_CMD?=docker -RBD_IMAGE_NAME=$(if $(ENV_RBD_IMAGE_NAME),$(ENV_RBD_IMAGE_NAME),quay.io/cephcsi/rbdplugin) -RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),canary) +CSI_IMAGE_NAME=$(if $(ENV_CSI_IMAGE_NAME),$(ENV_CSI_IMAGE_NAME),quay.io/cephcsi/cephcsi) +CSI_IMAGE_VERSION=$(if $(ENV_CSI_IMAGE_VERSION),$(ENV_CSI_IMAGE_VERSION),canary) -CEPHFS_IMAGE_NAME=$(if $(ENV_CEPHFS_IMAGE_NAME),$(ENV_CEPHFS_IMAGE_NAME),quay.io/cephcsi/cephfsplugin) -CEPHFS_IMAGE_VERSION=$(if $(ENV_CEPHFS_IMAGE_VERSION),$(ENV_CEPHFS_IMAGE_VERSION),canary) - -CSI_IMAGE_NAME?=quay.io/cephcsi/cephcsi -CSI_IMAGE_VERSION?=canary - -$(info rbd image settings: $(RBD_IMAGE_NAME) version $(RBD_IMAGE_VERSION)) -$(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSION)) +$(info cephcsi image settings: $(CSI_IMAGE_NAME) version $(CSI_IMAGE_VERSION)) all: cephcsi @@ -45,26 +38,14 @@ cephcsi: CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephcsi ./cmd/ image-cephcsi: cephcsi - cp deploy/cephcsi/image/Dockerfile _output - $(CONTAINER_CMD) build -t $(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) _output + cp _output/cephcsi deploy/cephcsi/image/cephcsi + $(CONTAINER_CMD) build -t $(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) deploy/cephcsi/image -image-rbdplugin: cephcsi - cp _output/cephcsi deploy/rbd/docker/rbdplugin - $(CONTAINER_CMD) build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker +push-image-cephcsi: image-cephcsi + $(CONTAINER_CMD) push $(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) -image-cephfsplugin: cephcsi - cp _output/cephcsi deploy/cephfs/docker/cephfsplugin - $(CONTAINER_CMD) build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker - -push-image-rbdplugin: image-rbdplugin - $(CONTAINER_CMD) push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) - -push-image-cephfsplugin: image-cephfsplugin - $(CONTAINER_CMD) push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) clean: go clean -r -x - rm -f deploy/rbd/docker/rbdplugin - rm -f deploy/cephfs/docker/cephfsplugin - rm -f _output/rbdplugin - rm -f _output/cephfsplugin + rm -f deploy/cephcsi/image/cephcsi + rm -f _output/cephcsi diff --git a/cmd/cephcsi.go b/cmd/cephcsi.go index f34a02dfe..3df44cbd4 100644 --- a/cmd/cephcsi.go +++ b/cmd/cephcsi.go @@ -103,7 +103,7 @@ func main() { if err != nil { klog.Fatalln(err) // calls exit } - + klog.Infof("Starting driver type: %v with name: %v", driverType, dname) switch driverType { case rbdType: rbd.PluginFolder = rbd.PluginFolder + dname diff --git a/deploy.sh b/deploy.sh index ccca27d0e..c483115bd 100755 --- a/deploy.sh +++ b/deploy.sh @@ -28,8 +28,7 @@ if [ "${TRAVIS_BRANCH}" == 'csi-v0.3' ]; then export ENV_RBD_IMAGE_VERSION='v0.3-canary' export ENV_CEPHFS_IMAGE_VERSION='v0.3-canary' elif [ "${TRAVIS_BRANCH}" == 'master' ]; then - export ENV_RBD_IMAGE_VERSION='canary' - export ENV_CEPHFS_IMAGE_VERSION='canary' + export ENV_CSI_IMAGE_VERSION='canary' else echo "!!! Branch ${TRAVIS_BRANCH} is not a deployable branch; exiting" exit 0 # Exiting 0 so that this isn't marked as failing @@ -37,7 +36,7 @@ fi if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then "${CONTAINER_CMD:-docker}" login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io - make push-image-rbdplugin push-image-cephfsplugin + make push-image-cephcsi set -xe diff --git a/deploy/cephcsi/image/Dockerfile b/deploy/cephcsi/image/Dockerfile index 3a11c225d..aeec79b7a 100644 --- a/deploy/cephcsi/image/Dockerfile +++ b/deploy/cephcsi/image/Dockerfile @@ -1,4 +1,3 @@ - FROM ceph/ceph:v14.2 LABEL maintainers="Ceph-CSI Authors" LABEL description="Ceph-CSI Plugin" @@ -7,8 +6,6 @@ ENV CSIBIN=/usr/local/bin/cephcsi COPY cephcsi $CSIBIN -RUN chmod +x $CSIBIN && \ - ln -sf $CSIBIN /usr/local/bin/cephcsi-rbd && \ - ln -sf $CSIBIN /usr/local/bin/cephcsi-cephfs +RUN chmod +x $CSIBIN ENTRYPOINT ["/usr/local/bin/cephcsi"] diff --git a/deploy/cephfs/docker/Dockerfile b/deploy/cephfs/docker/Dockerfile deleted file mode 100644 index cdb7c3c16..000000000 --- a/deploy/cephfs/docker/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM centos:7 -LABEL maintainers="Kubernetes Authors" -LABEL description="CephFS CSI Plugin" - -ENV CEPH_VERSION "mimic" -RUN yum install -y centos-release-ceph && \ - yum install -y kmod ceph-common ceph-fuse attr && \ - yum clean all - -COPY cephfsplugin /cephfsplugin - -RUN chmod +x /cephfsplugin && \ - mkdir -p /var/log/ceph - -ENTRYPOINT ["/cephfsplugin"] diff --git a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml index 24ffc8420..8810d14eb 100644 --- a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml +++ b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml @@ -25,7 +25,7 @@ spec: spec: serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }} hostNetwork: true - hostPID: true + hostPID: true # to use e.g. Rook orchestrated cluster, and mons' FQDN is # resolved through k8s service, set dns policy to cluster first dnsPolicy: ClusterFirstWithHostNet @@ -66,6 +66,7 @@ spec: image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" args : - "--nodeid=$(NODE_ID)" + - "--type=cephfs" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=$(DRIVER_NAME)" @@ -97,7 +98,7 @@ spec: - mountPath: /dev name: host-dev - mountPath: /rootfs - name: host-rootfs + name: host-rootfs - mountPath: /sys name: host-sys - mountPath: /lib/modules @@ -129,7 +130,7 @@ spec: path: /dev - name: host-rootfs hostPath: - path: / + path: / - name: host-sys hostPath: path: /sys diff --git a/deploy/cephfs/helm/templates/provisioner-statefulset.yaml b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml index 539647ab1..bd1b3f36a 100644 --- a/deploy/cephfs/helm/templates/provisioner-statefulset.yaml +++ b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml @@ -64,6 +64,7 @@ spec: image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" args : - "--nodeid=$(NODE_ID)" + - "--type=cephfs" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=$(DRIVER_NAME)" diff --git a/deploy/cephfs/helm/values.yaml b/deploy/cephfs/helm/values.yaml index cfc64fe54..e631f4d2a 100644 --- a/deploy/cephfs/helm/values.yaml +++ b/deploy/cephfs/helm/values.yaml @@ -49,8 +49,9 @@ nodeplugin: plugin: image: - repository: quay.io/cephcsi/cephfsplugin - tag: v1.0.0 + repository: quay.io/cephcsi/cephcsi + # for stable functionality replace canary with latest release version + tag: canary pullPolicy: IfNotPresent resources: {} diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml index 473b493b9..9ba6bb9fc 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml @@ -56,9 +56,11 @@ spec: privileged: true capabilities: add: ["SYS_ADMIN"] - image: quay.io/cephcsi/cephfsplugin:v1.0.0 + # for stable functionality replace canary with latest release version + image: quay.io/cephcsi/cephcsi:canary args: - "--nodeid=$(NODE_ID)" + - "--type=cephfs" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=cephfs.csi.ceph.com" diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index 849cf57b3..a0bb82c7d 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -48,9 +48,11 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: quay.io/cephcsi/cephfsplugin:v1.0.0 + # for stable functionality replace canary with latest release version + image: quay.io/cephcsi/cephcsi:canary args: - "--nodeid=$(NODE_ID)" + - "--type=cephfs" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=cephfs.csi.ceph.com" diff --git a/deploy/rbd/docker/Dockerfile b/deploy/rbd/docker/Dockerfile deleted file mode 100644 index 0228a9628..000000000 --- a/deploy/rbd/docker/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM centos:7 -LABEL maintainers="Kubernetes Authors" -LABEL description="RBD CSI Plugin" - -ENV CEPH_VERSION "mimic" -RUN yum install -y centos-release-ceph && \ - yum install -y ceph-common e2fsprogs xfsprogs rbd-nbd && \ - yum clean all - -COPY rbdplugin /rbdplugin -RUN chmod +x /rbdplugin -ENTRYPOINT ["/rbdplugin"] diff --git a/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml b/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml index 0da270116..a27f1c71b 100644 --- a/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml +++ b/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml @@ -66,6 +66,7 @@ spec: image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" args : - "--nodeid=$(NODE_ID)" + - "--type=rbd" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=$(DRIVER_NAME)" diff --git a/deploy/rbd/helm/templates/provisioner-statefulset.yaml b/deploy/rbd/helm/templates/provisioner-statefulset.yaml index 3b5d6fa25..12751b90d 100644 --- a/deploy/rbd/helm/templates/provisioner-statefulset.yaml +++ b/deploy/rbd/helm/templates/provisioner-statefulset.yaml @@ -81,6 +81,7 @@ spec: image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" args : - "--nodeid=$(NODE_ID)" + - "--type=rbd" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=$(DRIVER_NAME)" diff --git a/deploy/rbd/helm/values.yaml b/deploy/rbd/helm/values.yaml index da206d2ef..1c01752f9 100644 --- a/deploy/rbd/helm/values.yaml +++ b/deploy/rbd/helm/values.yaml @@ -51,8 +51,9 @@ nodeplugin: plugin: image: - repository: quay.io/cephcsi/rbdplugin - tag: v1.0.0 + repository: quay.io/cephcsi/cephcsi + # for stable functionality replace canary with latest release version + tag: canary pullPolicy: IfNotPresent resources: {} diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index 66e2f2364..8316025f3 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -71,9 +71,11 @@ spec: privileged: true capabilities: add: ["SYS_ADMIN"] - image: quay.io/cephcsi/rbdplugin:v1.0.0 + # for stable functionality replace canary with latest release version + image: quay.io/cephcsi/cephcsi:canary args: - "--nodeid=$(NODE_ID)" + - "--type=rbd" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=rbd.csi.ceph.com" diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index 322ada347..07431e7ad 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -49,9 +49,11 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: quay.io/cephcsi/rbdplugin:v1.0.0 + # for stable functionality replace canary with latest release version + image: quay.io/cephcsi/cephcsi:canary args: - "--nodeid=$(NODE_ID)" + - "--type=rbd" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - "--drivername=rbd.csi.ceph.com" diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index 7f0b67978..497646ca5 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -5,12 +5,12 @@ and attach and mount existing ones to workloads. ## Building -CSI CephFS plugin can be compiled in the form of a binary file or in the form +CSI plugin can be compiled in the form of a binary file or in the form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `cephcsi`. When compiled as an image, it's stored in the local Docker image store -with name `cephfsplugin`. +with name `cephcsi`. Building binary: @@ -21,21 +21,22 @@ make cephcsi Building Docker image: ```bash -make image-cephfsplugin +make image-cephcsi ``` ## Configuration **Available command line arguments:** -Option | Default value | Description ---------------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket -`--drivername` | `cephfs.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) -`--nodeid` | _empty_ | This node's ID -`--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed. -`--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) -`--mountcachedir` | _empty_ | volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. +| Option | Default value | Description | +| ------------------- | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket | +| `--drivername` | `cephfs.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) | +| `--nodeid` | _empty_ | This node's ID | +| `--type` | _empty_ | driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` | +| `--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed. | +| `--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) | +| `--mountcachedir` | _empty_ | volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. | **Available environmental variables:** @@ -48,16 +49,16 @@ is used to define in which namespace you want the configmaps to be stored **Available volume parameters:** -Parameter | Required | Description -----------------------------------------------------------------------------------------------------|--------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) -`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. If both `monitors` and `monValueFromSecret` are set and the monitors set in the secret exists, `monValueFromSecret` takes precedence. -`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments. -`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used. -`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created -`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume -`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value -`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-stage-secret-namespace` | for Kubernetes | namespaces of the above Secret objects +| Parameter | Required | Description | +| --------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) | +| `monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. If both `monitors` and `monValueFromSecret` are set and the monitors set in the secret exists, `monValueFromSecret` takes precedence. | +| `mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments. | +| `provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used. | +| `pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created | +| `rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume | +| `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value | +| `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-stage-secret-namespace` | for Kubernetes | namespaces of the above Secret objects | **Required secrets for `provisionVolume=true`:** Admin credentials are required for provisioning new volumes diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 415213555..7bf76c6fe 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -5,10 +5,10 @@ attach and mount those to workloads. ## Building -CSI RBD plugin can be compiled in a form of a binary file or in a form of a +CSI plugin can be compiled in a form of a binary file or in a form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `cephcsi`. When compiled as an image, it's -stored in the local Docker image store with name `rbdplugin`. +stored in the local Docker image store with name `cephcsi`. Building binary: @@ -19,20 +19,21 @@ make cephcsi Building Docker image: ```bash -make image-rbdplugin +make image-cephcsi ``` ## Configuration **Available command line arguments:** -Option | Default value | Description ------- | ------------- | ----------- -`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket -`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) -`--nodeid` | _empty_ | This node's ID -`--containerized` | true | Whether running in containerized mode -`--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning +| Option | Default value | Description | +| ----------------- | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket | +| `--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) | +| `--nodeid` | _empty_ | This node's ID | +| `--type` | _empty_ | driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` | +| `--containerized` | true | Whether running in containerized mode | +| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning | **Available environmental variables:** @@ -40,15 +41,15 @@ Option | Default value | Description **Available volume parameters:** -Parameter | Required | Description ---------- | -------- | ----------- -`clusterID` | yes | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use -`pool` | yes | Ceph pool into which the RBD image shall be created -`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) -`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) -`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | yes (for Kubernetes) | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value -`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | yes (for Kubernetes) | namespaces of the above Secret objects -`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images +| Parameter | Required | Description | +| ----------------------------------------------------------------------------------------------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `clusterID` | yes | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use | +| `pool` | yes | Ceph pool into which the RBD image shall be created | +| `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) | +| `imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) | +| `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | yes (for Kubernetes) | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value | +| `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | yes (for Kubernetes) | namespaces of the above Secret objects | +| `mounter` | no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | **NOTE:** An accompanying CSI configuration file, needs to be provided to the running pods. Refer to [Creating CSI configuration for RBD based