Removed config maps and replaced with rados omaps

Existing config maps are now replaced with rados omaps that help
store information regarding the requested volume names and the rbd
image names backing the same.

Further to detect cluster, pool and which image a volume ID refers
to, changes to volume ID encoding has been done as per provided
design specification in the stateless ceph-csi proposal.

Additional changes and updates,
- Updated documentation
- Updated manifests
- Updated Helm chart
- Addressed a few csi-test failures

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR 2019-04-22 17:35:39 -04:00 committed by mergify[bot]
parent f60a07ae82
commit d02e50aa9b
40 changed files with 2379 additions and 1373 deletions

View File

@ -42,16 +42,16 @@ var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "", "name of the driver") driverName = flag.String("drivername", "", "name of the driver")
nodeID = flag.String("nodeid", "", "node id") nodeID = flag.String("nodeid", "", "node id")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
// rbd related flags // rbd related flags
containerized = flag.Bool("containerized", true, "whether run as containerized") containerized = flag.Bool("containerized", true, "whether run as containerized")
configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+ instanceID = flag.String("instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+
" cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets") " instances, when sharing Ceph clusters across CSI instances for provisioning")
// cephfs related flags // cephfs related flags
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')") volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir") mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
) )
func init() { func init() {
@ -107,13 +107,8 @@ func main() {
switch driverType { switch driverType {
case rbdType: case rbdType:
rbd.PluginFolder = rbd.PluginFolder + dname rbd.PluginFolder = rbd.PluginFolder + dname
cp, err := util.CreatePersistanceStorage(
rbd.PluginFolder, *metadataStorage, dname)
if err != nil {
os.Exit(1)
}
driver := rbd.NewDriver() driver := rbd.NewDriver()
driver.Run(dname, *nodeID, *endpoint, *configRoot, *containerized, cp) driver.Run(dname, *nodeID, *endpoint, *instanceID, *containerized)
case cephfsType: case cephfsType:
cephfs.PluginFolder = cephfs.PluginFolder + dname cephfs.PluginFolder = cephfs.PluginFolder + dname

View File

@ -4,7 +4,7 @@ appVersion: "1.0.0"
description: "Container Storage Interface (CSI) driver, description: "Container Storage Interface (CSI) driver,
provisioner, and attacher for Ceph cephfs" provisioner, and attacher for Ceph cephfs"
name: ceph-csi-cephfs name: ceph-csi-cephfs
version: 0.6.0 version: 0.7.0
keywords: keywords:
- ceph - ceph
- cephfs - cephfs

View File

@ -4,7 +4,7 @@ appVersion: "1.0.0"
description: "Container Storage Interface (CSI) driver, description: "Container Storage Interface (CSI) driver,
provisioner, snapshotter, and attacher for Ceph RBD" provisioner, snapshotter, and attacher for Ceph RBD"
name: ceph-csi-rbd name: ceph-csi-rbd
version: 0.6.0 version: 0.7.0
keywords: keywords:
- ceph - ceph
- rbd - rbd

View File

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.configMapName | quote }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.json: |-
[]

View File

@ -70,7 +70,6 @@ spec:
- "--v=5" - "--v=5"
- "--drivername=$(DRIVER_NAME)" - "--drivername=$(DRIVER_NAME)"
- "--containerized=true" - "--containerized=true"
- "--metadatastorage=k8s_configmap"
env: env:
- name: HOST_ROOTFS - name: HOST_ROOTFS
value: "/rootfs" value: "/rootfs"
@ -101,6 +100,8 @@ spec:
- mountPath: /lib/modules - mountPath: /lib/modules
name: lib-modules name: lib-modules
readOnly: true readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
resources: resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes: volumes:
@ -132,6 +133,9 @@ spec:
- name: lib-modules - name: lib-modules
hostPath: hostPath:
path: /lib/modules path: /lib/modules
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.nodeplugin.affinity -}} {{- if .Values.nodeplugin.affinity -}}
affinity: affinity:
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }} {{ toYaml .Values.nodeplugin.affinity . | indent 8 }}

View File

@ -85,7 +85,6 @@ spec:
- "--v=5" - "--v=5"
- "--drivername=$(DRIVER_NAME)" - "--drivername=$(DRIVER_NAME)"
- "--containerized=true" - "--containerized=true"
- "--metadatastorage=k8s_configmap"
env: env:
- name: HOST_ROOTFS - name: HOST_ROOTFS
value: "/rootfs" value: "/rootfs"
@ -103,6 +102,8 @@ spec:
mountPath: {{ .Values.socketDir }} mountPath: {{ .Values.socketDir }}
- name: host-rootfs - name: host-rootfs
mountPath: "/rootfs" mountPath: "/rootfs"
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
resources: resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes: volumes:
@ -112,6 +113,9 @@ spec:
- name: host-rootfs - name: host-rootfs
hostPath: hostPath:
path: / path: /
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.provisioner.affinity -}} {{- if .Values.provisioner.affinity -}}
affinity: affinity:
{{ toYaml .Values.provisioner.affinity . | indent 8 }} {{ toYaml .Values.provisioner.affinity . | indent 8 }}

View File

@ -18,6 +18,7 @@ socketFile: csi.sock
registrationDir: /var/lib/kubelet/plugins_registry registrationDir: /var/lib/kubelet/plugins_registry
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
driverName: rbd.csi.ceph.com driverName: rbd.csi.ceph.com
configMapName: ceph-csi-config
attacher: attacher:
name: attacher name: attacher

View File

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[]
metadata:
name: ceph-csi-config

View File

@ -78,8 +78,6 @@ spec:
- "--v=5" - "--v=5"
- "--drivername=rbd.csi.ceph.com" - "--drivername=rbd.csi.ceph.com"
- "--containerized=true" - "--containerized=true"
- "--metadatastorage=k8s_configmap"
- "--configroot=k8s_objects"
env: env:
- name: HOST_ROOTFS - name: HOST_ROOTFS
value: "/rootfs" value: "/rootfs"
@ -87,10 +85,6 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
@ -106,6 +100,8 @@ spec:
- mountPath: /lib/modules - mountPath: /lib/modules
name: lib-modules name: lib-modules
readOnly: true readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
volumes: volumes:
- name: host-dev - name: host-dev
hostPath: hostPath:
@ -123,3 +119,6 @@ spec:
hostPath: hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate type: DirectoryOrCreate
- name: ceph-csi-config
configMap:
name: ceph-csi-config

View File

@ -56,8 +56,6 @@ spec:
- "--v=5" - "--v=5"
- "--drivername=rbd.csi.ceph.com" - "--drivername=rbd.csi.ceph.com"
- "--containerized=true" - "--containerized=true"
- "--metadatastorage=k8s_configmap"
- "--configroot=k8s_objects"
env: env:
- name: HOST_ROOTFS - name: HOST_ROOTFS
value: "/rootfs" value: "/rootfs"
@ -65,10 +63,6 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi.sock value: unix:///csi/csi.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
@ -90,6 +84,8 @@ spec:
- mountPath: /lib/modules - mountPath: /lib/modules
name: lib-modules name: lib-modules
readOnly: true readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
volumes: volumes:
- name: plugin-dir - name: plugin-dir
hostPath: hostPath:
@ -119,3 +115,6 @@ spec:
- name: lib-modules - name: lib-modules
hostPath: hostPath:
path: /lib/modules path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config

View File

@ -32,40 +32,32 @@ Option | Default value | Description
`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) `--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
`--nodeid` | _empty_ | This node's ID `--nodeid` | _empty_ | This node's ID
`--containerized` | true | Whether running in containerized mode `--containerized` | true | Whether running in containerized mode
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning
`--configroot` | `/etc/csi-config` | Directory in which CSI specific Ceph cluster configurations are present, OR the value `k8s_objects` if present as kubernetes secrets"
**Available environmental variables:** **Available environmental variables:**
`HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`. `HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`.
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
the path of your k8s config file (if not specified, the plugin will assume
you're running it inside a k8s cluster and find the config itself).
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store,
`POD_NAMESPACE` is used to define in which namespace you want
the configmaps to be stored
**Available volume parameters:** **Available volume parameters:**
Parameter | Required | Description Parameter | Required | Description
--------- | -------- | ----------- --------- | -------- | -----------
`monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) `clusterID` | yes | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use
`monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use
`pool` | yes | Ceph pool into which the RBD image shall be created `pool` | yes | Ceph pool into which the RBD image shall be created
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) `imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | yes (for Kubernetes) | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | yes (for Kubernetes) | namespaces of the above Secret objects
`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images `mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images
NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster **NOTE:** An accompanying CSI configuration file, needs to be provided to the
configuration secret or config files needs to be provided to the running pods. running pods. Refer to [Creating CSI configuration for RBD based
Refer to [Cluster ID based configuration](../examples/README.md#cluster-id-based-configuration) provisioning](../examples/README.md#creating-csi-configuration-for-rbd-based-provisioning)
for more information. A suggested way to populate the clusterID is to use the for more information.
output of `ceph fsid` of the Ceph cluster to be used for provisioning.
**NOTE:** A suggested way to populate and retain uniquness of the clusterID is
to use the output of `ceph fsid` of the Ceph cluster to be used for
provisioning.
**Required secrets:** **Required secrets:**
@ -73,11 +65,6 @@ Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
client with admin privileges, and the value is its password client with admin privileges, and the value is its password
If clusterID is specified, then a secret with various keys and values as
specified in `examples/rbd/template-ceph-cluster-ID-secret.yaml` needs to be
created, with the secret name matching the string value provided as the
`clusterID`.
## Deployment with Kubernetes ## Deployment with Kubernetes
Requires Kubernetes 1.11 Requires Kubernetes 1.11
@ -101,6 +88,18 @@ Those manifests deploy service accounts, cluster roles and cluster role
bindings. These are shared for both RBD and CephFS CSI plugins, as they require bindings. These are shared for both RBD and CephFS CSI plugins, as they require
the same permissions. the same permissions.
**Deploy ConfigMap for CSI plugins:**
```bash
kubectl create -f csi-config-map.yaml
```
The config map deploys an empty CSI configuration that is mounted as a volume
within the Ceph CSI plugin pods. To add a specific Ceph clusters configuration
details, refer to [Creating CSI configuration for RBD based
provisioning](../examples/README.md#creating-csi-configuration-for-rbd-based-provisioning)
for more information.
**Deploy CSI sidecar containers:** **Deploy CSI sidecar containers:**
```bash ```bash
@ -134,7 +133,10 @@ service/csi-rbdplugin-provisioner ClusterIP 10.104.2.130 <none> 123
... ...
``` ```
You can try deploying a demo pod from `examples/rbd` to test the deployment further. Once the CSI plugin configuration is updated with details from a Ceph cluster of
choice, you can try deploying a demo pod from examples/rbd using the
instructions [provided](../examples/README.md#deploying-the-storage-class) to
test the deployment further.
## Deployment with Helm ## Deployment with Helm

View File

@ -1,5 +1,7 @@
# How to test RBD and CephFS plugins with Kubernetes 1.13 # How to test RBD and CephFS plugins with Kubernetes 1.13
## Deploying Ceph-CSI services
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
`plugin-teardown.sh` helper scripts. You can use those to help you `plugin-teardown.sh` helper scripts. You can use those to help you
deploy/teardown RBACs, sidecar containers and the plugin in one go. deploy/teardown RBACs, sidecar containers and the plugin in one go.
@ -7,16 +9,45 @@ By default, they look for the YAML manifests in
`../../deploy/{rbd,cephfs}/kubernetes`. `../../deploy/{rbd,cephfs}/kubernetes`.
You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
## Creating CSI configuration for RBD based provisioning
**NOTE:** This section is not required for cephfs based provisioning, and SHOULD
be skipped.
For RBD based provisioning, the CSI plugin requires configuration information
regarding the Ceph cluster(s), that would host the RBD based block devices. This
is provided by adding a per-cluster identifier (referred to as clusterID), and
the required monitor details for the same, as in the provided [sample config
map](./rbd/csi-config-map-sample.yaml).
Gather the following information from the Ceph cluster(s) of choice,
* Ceph monitor list
* Typically in the output of `ceph mon dump`
* Used to prepare a list of `monitors` in the CSI configuration file
* Ceph Cluster fsid
* If choosing to use the Ceph cluster fsid as the unique value of clusterID,
* Output of `ceph fsid`
* Alternatively, choose a `<cluster-id>` value that is distinct per Ceph
cluster in use by this kubernetes cluster
Update the [sample config map](./rbd/csi-config-map-sample.yaml) with values
from a Ceph cluster and replace `<cluster-id>` with the chosen clusterID, to
create the manifest for the config map which can be updated in the cluster
using the following command,
* `kubectl replace -f rbd/csi-config-map-sample.yaml`
Storage class and snapshot class, using `<cluster-id>` as the value for the
option `clusterID`, can now be created on the cluster.
## Deploying the storage class
Once the plugin is successfully deployed, you'll need to customize Once the plugin is successfully deployed, you'll need to customize
`storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster
setup. setup.
Please consult the documentation for info about available parameters. Please consult the documentation for info about available parameters.
**NOTE:** See section
[Cluster ID based configuration](#cluster-id-based-configuration) if using
the `clusterID` instead of `monitors` or `monValueFromSecret` option in the
storage class for RBD based provisioning before proceeding.
After configuring the secrets, monitors, etc. you can deploy a After configuring the secrets, monitors, etc. you can deploy a
testing Pod mounting a RBD image / CephFS volume: testing Pod mounting a RBD image / CephFS volume:
@ -32,7 +63,7 @@ Other helper scripts:
* `logs.sh` output of the plugin * `logs.sh` output of the plugin
* `exec-bash.sh` logs into the plugin's container and runs bash * `exec-bash.sh` logs into the plugin's container and runs bash
## How to test RBD Snapshot feature ### How to test RBD Snapshot feature
Before continuing, make sure you enabled the required Before continuing, make sure you enabled the required
feature gate `VolumeSnapshotDataSource=true` in your Kubernetes cluster. feature gate `VolumeSnapshotDataSource=true` in your Kubernetes cluster.
@ -42,7 +73,7 @@ In the `examples/rbd` directory you will find two files related to snapshots:
[snapshot.yaml](./rbd/snapshot.yaml). [snapshot.yaml](./rbd/snapshot.yaml).
Once you created your RBD volume, you'll need to customize at least Once you created your RBD volume, you'll need to customize at least
`snapshotclass.yaml` and make sure the `monitors` and `pool` parameters match `snapshotclass.yaml` and make sure the `clusterid` and `pool` parameters match
your Ceph cluster setup. your Ceph cluster setup.
If you followed the documentation to create the rbdplugin, you shouldn't If you followed the documentation to create the rbdplugin, you shouldn't
have to edit any other file. have to edit any other file.
@ -120,7 +151,7 @@ kubectl create -f pvc-restore.yaml
kubectl create -f pod-restore.yaml kubectl create -f pod-restore.yaml
``` ```
## How to test RBD MULTI_NODE_MULTI_WRITER BLOCK feature ### How to test RBD MULTI_NODE_MULTI_WRITER BLOCK feature
Requires feature-gates: `BlockVolume=true` `CSIBlockVolume=true` Requires feature-gates: `BlockVolume=true` `CSIBlockVolume=true`
@ -218,37 +249,3 @@ Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
``` ```
## Cluster ID based configuration
Before creating a storage class that uses the option `clusterID` to refer to a
Ceph cluster, the following actions need to be completed.
Get the following information from the Ceph cluster,
* Admin ID and key, that has privileges to perform CRUD operations on the Ceph
cluster and pools of choice
* Key is typically the output of, `ceph auth get-key client.admin` where
`admin` is the Admin ID
* Used to substitute admin/user id and key values in the files below
* Ceph monitor list
* Typically in the output of `ceph mon dump`
* Used to prepare comma separated MON list where required in the files below
* Ceph Cluster fsid
* If choosing to use the Ceph cluster fsid as the unique value of clusterID,
* Output of `ceph fsid`
* Used to substitute `<cluster-id>` references in the files below
Update the template
[template-ceph-cluster-ID-secret.yaml](./rbd/template-ceph-cluster-ID-secret.yaml)
with values from
a Ceph cluster and replace `<cluster-id>` with the chosen clusterID to create
the following secret,
* `kubectl create -f rbd/template-ceph-cluster-ID-secret.yaml`
Storage class and snapshot class, using `<cluster-id>` as the value for the
option `clusterID`, can now be created on the cluster.
Remaining steps to test functionality remains the same as mentioned in the
sections above.

View File

@ -0,0 +1,32 @@
---
# This is a sample config map that helps define a Ceph cluster configuration
# as required by the CSI plugins.
apiVersion: v1
kind: ConfigMap
# The <cluster-id> is used by the CSI plugin to uniquely identify and use a
# Ceph cluster, the value MUST match the value provided as `clusterID` in the
# StorageClass
# The <MONValue#> fields are the various monitor addresses for the Ceph cluster
# identified by the <cluster-id>
# If a CSI plugin is using more than one Ceph cluster, repeat the section for
# each such cluster in use.
# To add more clusters or edit MON addresses in an existing config map, use
# the `kubectl replace` command.
# NOTE: Changes to the config map is automatically updated in the running pods,
# thus restarting existing pods using the config map is NOT required on edits
# to the config map.
data:
config.json: |-
[
{
"clusterID": "<cluster-id>",
"monitors": [
"<MONValue1>",
"<MONValue2>",
...
"<MONValueN>"
]
}
]
metadata:
name: ceph-csi-config

View File

@ -9,6 +9,3 @@ data:
admin: BASE64-ENCODED-PASSWORD admin: BASE64-ENCODED-PASSWORD
# Key value corresponds to a user name defined in ceph cluster # Key value corresponds to a user name defined in ceph cluster
kubernetes: BASE64-ENCODED-PASSWORD kubernetes: BASE64-ENCODED-PASSWORD
# if monValueFromSecret is set to "monitors", uncomment the
# following and set the mon there
# monitors: BASE64-ENCODED-Comma-Delimited-Mons

View File

@ -5,19 +5,16 @@ metadata:
name: csi-rbdplugin-snapclass name: csi-rbdplugin-snapclass
snapshotter: rbd.csi.ceph.com snapshotter: rbd.csi.ceph.com
parameters: parameters:
pool: rbd
# Comma separated list of Ceph monitors
# if using FQDN, make sure csi plugin's dns policy is appropriate.
monitors: mon1:port,mon2:port,...
# OR,
# String representing a Ceph cluster to provision storage from. # String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning, # Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for # cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use. # the lifetime of the StorageClass in use.
# If using clusterID, ensure to create a secret, as in # Ensure to create an entry in the config map named ceph-csi-config, based on
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to # csi-config-map-sample.yaml, to accompany the string chosen to
# represent the Ceph cluster in clusterID # represent the Ceph cluster in clusterID below
# clusterID: <cluster-id> clusterID: <cluster-id>
pool: rbd
csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
csi.storage.k8s.io/snapshotter-secret-namespace: default csi.storage.k8s.io/snapshotter-secret-namespace: default

View File

@ -5,23 +5,14 @@ metadata:
name: csi-rbd-sc name: csi-rbd-sc
provisioner: rbd.csi.ceph.com provisioner: rbd.csi.ceph.com
parameters: parameters:
# Comma separated list of Ceph monitors
# if using FQDN, make sure csi plugin's dns policy is appropriate.
monitors: mon1:port,mon2:port,...
# OR,
# String representing a Ceph cluster to provision storage from. # String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning, # Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for # cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use. # the lifetime of the StorageClass in use.
# If using clusterID, ensure to create a secret, as in # Ensure to create an entry in the config map named ceph-csi-config, based on
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to # csi-config-map-sample.yaml, to accompany the string chosen to
# represent the Ceph cluster in clusterID # represent the Ceph cluster in clusterID below
# clusterID: <cluster-id> clusterID: <cluster-id>
# OR,
# if "monitors" parameter is not set, driver to get monitors from same
# secret as admin/user credentials. "monValueFromSecret" provides the
# key in the secret whose value is the mons
# monValueFromSecret: "monitors"
# Ceph pool into which the RBD image shall be created # Ceph pool into which the RBD image shall be created
pool: rbd pool: rbd
@ -34,20 +25,15 @@ parameters:
imageFeatures: layering imageFeatures: layering
# The secrets have to contain Ceph admin credentials. # The secrets have to contain Ceph admin credentials.
# NOTE: If using "clusterID" instead of "monitors" above, the following
# secrets MAY be added to the ceph-cluster-<cluster-id> secret and skipped
# here
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-publish-secret-namespace: default csi.storage.k8s.io/node-publish-secret-namespace: default
# Ceph users for operating RBD # Ceph users for operating RBD
# NOTE: If using "clusterID" instead of "monitors" above, the following
# IDs MAY be added to the ceph-cluster-<cluster-id> secret and skipped
# here
adminid: admin adminid: admin
userid: kubernetes userid: kubernetes
# uncomment the following to use rbd-nbd as mounter on supported nodes # uncomment the following to use rbd-nbd as mounter on supported nodes
# mounter: rbd-nbd # mounter: rbd-nbd
reclaimPolicy: Delete reclaimPolicy: Delete

View File

@ -1,36 +0,0 @@
---
# This is a template secret that helps define a Ceph cluster configuration
# as required by the CSI driver. This is used when a StorageClass has the
# "clusterID" defined as one of the parameters, to provide the CSI instance
# Ceph cluster configuration information.
apiVersion: v1
kind: Secret
metadata:
# The <cluster-id> is used by the CSI plugin to uniquely identify and use a
# Ceph cluster, the value MUST match the value provided as `clusterID` in the
# StorageClass
name: ceph-cluster-<cluster-id>
namespace: default
data:
# Base64 encoded and comma separated Ceph cluster monitor list
# - Typically output of: `echo -n "mon1:port,mon2:port,..." | base64`
monitors: <BASE64-ENCODED-MONLIST>
# Base64 encoded and comma separated list of pool names from which volumes
# can be provisioned
pools: <BASE64-ENCODED-POOLIST>
# Base64 encoded admin ID to use for provisioning
# - Typically output of: `echo -n "<admin-id>" | base64`
# Substitute the entire string including angle braces, with the base64 value
adminid: <BASE64-ENCODED-ID>
# Base64 encoded key of the provisioner admin ID
# - Output of: `ceph auth get-key client.<admin-id> | base64`
# Substitute the entire string including angle braces, with the base64 value
adminkey: <BASE64-ENCODED-PASSWORD>
# Base64 encoded user ID to use for publishing
# - Typically output of: `echo -n "<admin-id>" | base64`
# Substitute the entire string including angle braces, with the base64 value
userid: <BASE64-ENCODED-ID>
# Base64 encoded key of the publisher user ID
# - Output of: `ceph auth get-key client.<admin-id> | base64`
# Substitute the entire string including angle braces, with the base64 value
userkey: <BASE64-ENCODED-PASSWORD>

View File

@ -1,33 +0,0 @@
---
# This is a patch to the existing daemonset deployment of CSI rbdplugin.
#
# This is to be used when using `clusterID` instead of monitors or
# monValueFromSecret in the StorageClass to specify the Ceph cluster to
# provision storage from, AND when the value of `--configroot` option to the
# CSI pods is NOT "k8s_objects".
#
# This patch file, patches in the specified secret for the 'clusterID' as a
# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes
# secrets.
#
# Post substituting the <cluster-id> in all places execute,
# `kubectl patch daemonset csi-rbdplugin --patch\
# "$(cat template-csi-rbdplugin-patch.yaml)"`
# to patch the daemonset deployment.
#
# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\
# "$(cat template-csi-rbdplugin-patch.yaml)"`
# to patch the statefulset deployment.
spec:
template:
spec:
containers:
- name: csi-rbdplugin
volumeMounts:
- name: ceph-cluster-<cluster-id>
mountPath: "/etc/csi-config/ceph-cluster-<cluster-id>"
readOnly: true
volumes:
- name: ceph-cluster-<cluster-id>
secret:
secretName: ceph-cluster-<cluster-id>

View File

@ -18,6 +18,8 @@ package cephfs
import ( import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/pkg/util"
) )
const ( const (
@ -72,7 +74,7 @@ func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID
"-m", volOptions.Monitors, "-m", volOptions.Monitors,
"-n", adminID, "-n", adminID,
"--key="+adminCr.key, "--key="+adminCr.key,
"-c", cephConfigPath, "-c", util.CephConfigPath,
"-f", "json", "-f", "json",
"auth", "get", userID, "auth", "get", userID,
) )
@ -85,7 +87,7 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
"-m", volOptions.Monitors, "-m", volOptions.Monitors,
"-n", adminID, "-n", adminID,
"--key="+adminCr.key, "--key="+adminCr.key,
"-c", cephConfigPath, "-c", util.CephConfigPath,
"-f", "json", "-f", "json",
"auth", "get-or-create", userID, "auth", "get-or-create", userID,
// User capabilities // User capabilities
@ -102,7 +104,7 @@ func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
"-m", volOptions.Monitors, "-m", volOptions.Monitors,
"-n", adminID, "-n", adminID,
"--key="+adminCr.key, "--key="+adminCr.key,
"-c", cephConfigPath, "-c", util.CephConfigPath,
"auth", "rm", userID, "auth", "rm", userID,
) )
} }

View File

@ -101,7 +101,7 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
klog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter) klog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
if err := writeCephConfig(); err != nil { if err := util.WriteCephConfig(); err != nil {
klog.Fatalf("failed to write ceph configuration file: %v", err) klog.Fatalf("failed to write ceph configuration file: %v", err)
} }

View File

@ -25,6 +25,8 @@ import (
"strconv" "strconv"
"sync" "sync"
"github.com/ceph/ceph-csi/pkg/util"
"k8s.io/klog" "k8s.io/klog"
) )
@ -115,7 +117,7 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) er
args := [...]string{ args := [...]string{
mountPoint, mountPoint,
"-m", volOptions.Monitors, "-m", volOptions.Monitors,
"-c", cephConfigPath, "-c", util.CephConfigPath,
"-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key, "-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key,
"-r", volOptions.RootPath, "-r", volOptions.RootPath,
"-o", "nonempty", "-o", "nonempty",

View File

@ -18,20 +18,12 @@ package rbd
import ( import (
"fmt" "fmt"
"os/exec"
"sort"
"strconv"
"syscall"
csicommon "github.com/ceph/ceph-csi/pkg/csi-common" csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util" "github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -46,33 +38,6 @@ const (
// controller server spec. // controller server spec.
type ControllerServer struct { type ControllerServer struct {
*csicommon.DefaultControllerServer *csicommon.DefaultControllerServer
MetadataStore util.CachePersister
}
var (
rbdVolumes = map[string]rbdVolume{}
rbdSnapshots = map[string]rbdSnapshot{}
)
// LoadExDataFromMetadataStore loads the rbd volume and snapshot
// info from metadata store
func (cs *ControllerServer) LoadExDataFromMetadataStore() error {
vol := &rbdVolume{}
// nolint
cs.MetadataStore.ForAll("csi-rbd-vol-", vol, func(identifier string) error {
rbdVolumes[identifier] = *vol
return nil
})
snap := &rbdSnapshot{}
// nolint
cs.MetadataStore.ForAll("csi-rbd-(.*)-snap-", snap, func(identifier string) error {
rbdSnapshots[identifier] = *snap
return nil
})
klog.Infof("Loaded %d volumes and %d snapshots from metadata store", len(rbdVolumes), len(rbdSnapshots))
return nil
} }
func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) error { func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) error {
@ -87,10 +52,17 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro
if req.VolumeCapabilities == nil { if req.VolumeCapabilities == nil {
return status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") return status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
} }
options := req.GetParameters()
if value, ok := options["clusterID"]; !ok || len(value) == 0 {
return status.Error(codes.InvalidArgument, "Missing or empty cluster ID to provision volume from")
}
if value, ok := options["pool"]; !ok || len(value) == 0 {
return status.Error(codes.InvalidArgument, "Missing or empty pool name to provision volume from")
}
return nil return nil
} }
func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { func (cs *ControllerServer) parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) {
// TODO (sbezverk) Last check for not exceeding total storage capacity // TODO (sbezverk) Last check for not exceeding total storage capacity
isMultiNode := false isMultiNode := false
@ -111,38 +83,28 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) {
} }
// if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks // if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks
rbdVol, err := getRBDVolumeOptions(req.GetParameters(), (isMultiNode && isBlock)) rbdVol, err := genVolFromVolumeOptions(req.GetParameters(), (isMultiNode && isBlock))
if err != nil { if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
// Generating Volume Name and Volume ID, as according to CSI spec they MUST be different rbdVol.RequestName = req.GetName()
volName := req.GetName()
uniqueID := uuid.NewUUID().String()
rbdVol.VolName = volName
volumeID := "csi-rbd-vol-" + uniqueID
rbdVol.VolID = volumeID
// Volume Size - Default is 1 GiB // Volume Size - Default is 1 GiB
volSizeBytes := int64(oneGB) volSizeBytes := int64(oneGB)
if req.GetCapacityRange() != nil { if req.GetCapacityRange() != nil {
volSizeBytes = req.GetCapacityRange().GetRequiredBytes() volSizeBytes = req.GetCapacityRange().GetRequiredBytes()
} }
rbdVol.VolSize = util.RoundUpToMiB(volSizeBytes) // always round up the request size in bytes to the nearest MiB
rbdVol.VolSize = util.MiB * util.RoundUpToMiB(volSizeBytes)
// NOTE: rbdVol does not contain VolID and RbdImageName populated, everything
// else is populated post create request parsing
return rbdVol, nil return rbdVol, nil
} }
func storeVolumeMetadata(vol *rbdVolume, cp util.CachePersister) error { // CreateVolume creates the volume in backend
if err := cp.Create(vol.VolID, vol); err != nil {
klog.Errorf("failed to store metadata for volume %s: %v", vol.VolID, err)
return err
}
return nil
}
// CreateVolume creates the volume in backend and store the volume metadata
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateVolumeReq(req); err != nil { if err := cs.validateVolumeReq(req); err != nil {
@ -155,50 +117,46 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
} }
}() }()
// Need to check for already existing volume name, and if found rbdVol, err := cs.parseVolCreateRequest(req)
// check for the requested capacity and already allocated capacity if err != nil {
if exVol, err := getRBDVolumeByName(req.GetName()); err == nil { return nil, err
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize >= req.GetCapacityRange().GetRequiredBytes() {
// existing volume is compatible with new request and should be reused.
if err = storeVolumeMetadata(exVol, cs.MetadataStore); err != nil {
return nil, status.Error(codes.Internal, err.Error())
} }
// TODO (sbezverk) Do I need to make sure that RBD volume still exists? found, err := checkVolExists(rbdVol, req.GetSecrets())
if err != nil {
if _, ok := err.(ErrVolNameConflict); ok {
return nil, status.Error(codes.AlreadyExists, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
if found {
return &csi.CreateVolumeResponse{ return &csi.CreateVolumeResponse{
Volume: &csi.Volume{ Volume: &csi.Volume{
VolumeId: exVol.VolID, VolumeId: rbdVol.VolID,
CapacityBytes: exVol.VolSize, CapacityBytes: rbdVol.VolSize,
VolumeContext: req.GetParameters(), VolumeContext: req.GetParameters(),
}, },
}, nil }, nil
} }
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
rbdVol, err := parseVolCreateRequest(req) err = reserveVol(rbdVol, req.GetSecrets())
if err != nil { if err != nil {
return nil, err
}
// Check if there is already RBD image with requested name
err = cs.checkRBDStatus(rbdVol, req, int(rbdVol.VolSize))
if err != nil {
return nil, err
}
// store volume size in bytes (snapshot and check existing volume needs volume
// size in bytes)
rbdVol.VolSize = rbdVol.VolSize * util.MiB
rbdVolumes[rbdVol.VolID] = *rbdVol
if err = storeVolumeMetadata(rbdVol, cs.MetadataStore); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
defer func() {
if err != nil {
errDefer := unreserveVol(rbdVol, req.GetSecrets())
if errDefer != nil {
klog.Warningf("failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
}
}
}()
err = cs.createBackingImage(rbdVol, req, util.RoundUpToMiB(rbdVol.VolSize))
if err != nil {
return nil, err
}
return &csi.CreateVolumeResponse{ return &csi.CreateVolumeResponse{
Volume: &csi.Volume{ Volume: &csi.Volume{
@ -209,27 +167,24 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
}, nil }, nil
} }
func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeMiB int) error { func (cs *ControllerServer) createBackingImage(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeMiB int64) error {
var err error var err error
// Check if there is already RBD image with requested name
//nolint
found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets())
if !found {
// if VolumeContentSource is not nil, this request is for snapshot // if VolumeContentSource is not nil, this request is for snapshot
if req.VolumeContentSource != nil { if req.VolumeContentSource != nil {
if err = cs.checkSnapshot(req, rbdVol); err != nil { if err = cs.checkSnapshot(req, rbdVol); err != nil {
return err return err
} }
} else { } else {
err = createRBDImage(rbdVol, volSizeMiB, rbdVol.AdminID, req.GetSecrets()) err = createImage(rbdVol, volSizeMiB, rbdVol.AdminID, req.GetSecrets())
if err != nil { if err != nil {
klog.Warningf("failed to create volume: %v", err) klog.Warningf("failed to create volume: %v", err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
klog.V(4).Infof("create volume %s", rbdVol.VolName) klog.V(4).Infof("created image %s", rbdVol.RbdImageName)
}
} }
return nil return nil
} }
func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) error { func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) error {
@ -244,15 +199,18 @@ func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *
} }
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil { if err := genSnapFromSnapID(rbdSnap, snapshotID, req.GetSecrets()); err != nil {
return status.Error(codes.NotFound, err.Error()) if _, ok := err.(ErrSnapNotFound); !ok {
return status.Error(codes.Internal, err.Error())
}
return status.Error(codes.InvalidArgument, "Missing requested Snapshot ID")
} }
err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets()) err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets())
if err != nil { if err != nil {
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
klog.V(4).Infof("create volume %s from snapshot %s", req.GetName(), rbdSnap.SnapName) klog.V(4).Infof("create volume %s from snapshot %s", req.GetName(), rbdSnap.RbdSnapName)
return nil return nil
} }
@ -265,8 +223,10 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
} }
// For now the image get unconditionally deleted, but here retention policy can be checked // For now the image get unconditionally deleted, but here retention policy can be checked
volumeID := req.GetVolumeId() volumeID := req.GetVolumeId()
if volumeID == "" {
return nil, status.Error(codes.InvalidArgument, "Empty volume ID in request")
}
volumeIDMutex.LockKey(volumeID) volumeIDMutex.LockKey(volumeID)
defer func() { defer func() {
if err := volumeIDMutex.UnlockKey(volumeID); err != nil { if err := volumeIDMutex.UnlockKey(volumeID); err != nil {
klog.Warningf("failed to unlock mutex volume:%s %v", volumeID, err) klog.Warningf("failed to unlock mutex volume:%s %v", volumeID, err)
@ -274,84 +234,66 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}() }()
rbdVol := &rbdVolume{} rbdVol := &rbdVolume{}
if err := cs.MetadataStore.Get(volumeID, rbdVol); err != nil { if err := genVolFromVolID(rbdVol, volumeID, req.GetSecrets()); err != nil {
if err, ok := err.(*util.CacheEntryNotFound); ok { // if error is ErrKeyNotFound, then a previous attempt at deletion was complete
klog.V(3).Infof("metadata for volume %s not found, assuming the volume to be already deleted (%v)", volumeID, err) // or partially complete (image and imageOMap are garbage collected already), hence return
// success as deletion is complete
if _, ok := err.(util.ErrKeyNotFound); ok {
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
return nil, err // All errors other than ErrImageNotFound should return an error back to the caller
if _, ok := err.(ErrImageNotFound); !ok {
return nil, status.Error(codes.Internal, err.Error())
} }
volName := rbdVol.VolName // If error is ErrImageNotFound then we failed to find the image, but found the imageOMap
// to lead us to the image, hence the imageOMap needs to be garbage collected, by calling
// unreserve for the same
volumeNameMutex.LockKey(rbdVol.RequestName)
defer func() {
if err := volumeNameMutex.UnlockKey(rbdVol.RequestName); err != nil {
klog.Warningf("failed to unlock mutex volume:%s %v", rbdVol.RequestName, err)
}
}()
if err := unreserveVol(rbdVol, req.GetSecrets()); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.DeleteVolumeResponse{}, nil
}
// lock out parallel create requests against the same volume name as we
// cleanup the image and associated omaps for the same
volumeNameMutex.LockKey(rbdVol.RequestName)
defer func() {
if err := volumeNameMutex.UnlockKey(rbdVol.RequestName); err != nil {
klog.Warningf("failed to unlock mutex volume:%s %v", rbdVol.RequestName, err)
}
}()
// Deleting rbd image // Deleting rbd image
klog.V(4).Infof("deleting volume %s", volName) klog.V(4).Infof("deleting image %s", rbdVol.RbdImageName)
if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil { if err := deleteImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil {
// TODO: can we detect "already deleted" situations here and proceed? klog.Errorf("failed to delete rbd image: %s/%s with error: %v",
klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err) rbdVol.Pool, rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if err := cs.MetadataStore.Delete(volumeID); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
delete(rbdVolumes, volumeID)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
// ListVolumes returns a list of volumes stored in memory
func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
var startToken int
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_VOLUMES); err != nil {
klog.Warningf("invalid list volume req: %v", req)
return nil, err
}
//validate starting token if present
if len(req.GetStartingToken()) > 0 {
i, parseErr := strconv.ParseUint(req.StartingToken, 10, 32)
if parseErr != nil {
return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error())
}
//check starting Token is greater than list of rbd volumes
if len(rbdVolumes) < int(i) {
return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error())
}
startToken = int(i)
}
var entries []*csi.ListVolumesResponse_Entry
keys := make([]string, 0)
for k := range rbdVolumes {
keys = append(keys, k)
}
sort.Strings(keys)
for index, k := range keys {
if index < startToken {
continue
}
entries = append(entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: rbdVolumes[k].VolID,
CapacityBytes: rbdVolumes[k].VolSize,
VolumeContext: extractStoredVolOpt(rbdVolumes[k]),
},
})
}
resp := &csi.ListVolumesResponse{
Entries: entries,
}
return resp, nil
}
// ValidateVolumeCapabilities checks whether the volume capabilities requested // ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported. // are supported.
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
if req.GetVolumeId() == "" {
return nil, status.Error(codes.InvalidArgument, "Empty volume ID in request")
}
if len(req.VolumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "Empty volume capabilities in request")
}
for _, cap := range req.VolumeCapabilities { for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
@ -368,100 +310,90 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req
// in store // in store
// nolint: gocyclo // nolint: gocyclo
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
if err := cs.validateSnapshotReq(req); err != nil { if err := cs.validateSnapshotReq(req); err != nil {
return nil, err return nil, err
} }
snapshotNameMutex.LockKey(req.GetName())
snapshotNameMutex.LockKey(req.GetName())
defer func() { defer func() {
if err := snapshotNameMutex.UnlockKey(req.GetName()); err != nil { if err := snapshotNameMutex.UnlockKey(req.GetName()); err != nil {
klog.Warningf("failed to unlock mutex snapshot:%s %v", req.GetName(), err) klog.Warningf("failed to unlock mutex snapshot:%s %v", req.GetName(), err)
} }
}() }()
// Need to check for already existing snapshot name, and if found // Fetch source volume information
// check for the requested source volume id and already allocated source volume id rbdVol := new(rbdVolume)
if exSnap, err := getRBDSnapshotByName(req.GetName()); err == nil { err := genVolFromVolID(rbdVol, req.GetSourceVolumeId(), req.GetSecrets())
if req.SourceVolumeId == exSnap.SourceVolumeID { if err != nil {
if err = storeSnapshotMetadata(exSnap, cs.MetadataStore); err != nil { if _, ok := err.(ErrImageNotFound); ok {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Errorf(codes.NotFound, "Source Volume ID %s not found", req.GetSourceVolumeId())
}
return nil, status.Errorf(codes.Internal, err.Error())
} }
// Check if source volume was created with required image features for snaps
if !hasSnapshotFeature(rbdVol.ImageFeatures) {
return nil, status.Errorf(codes.InvalidArgument, "volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
}
// Create snap volume
rbdSnap := genSnapFromOptions(rbdVol, req.GetParameters())
rbdSnap.RbdImageName = rbdVol.RbdImageName
rbdSnap.SizeBytes = rbdVol.VolSize
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
rbdSnap.RequestName = req.GetName()
// Need to check for already existing snapshot name, and if found
// check for the requested source volume id and already allocated source volume id
found, err := checkSnapExists(rbdSnap, req.GetSecrets())
if err != nil {
if _, ok := err.(ErrSnapNameConflict); ok {
return nil, status.Error(codes.AlreadyExists, err.Error())
}
return nil, status.Errorf(codes.Internal, err.Error())
}
if found {
return &csi.CreateSnapshotResponse{ return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{ Snapshot: &csi.Snapshot{
SizeBytes: exSnap.SizeBytes, SizeBytes: rbdSnap.SizeBytes,
SnapshotId: exSnap.SnapID, SnapshotId: rbdSnap.SnapID,
SourceVolumeId: exSnap.SourceVolumeID, SourceVolumeId: rbdSnap.SourceVolumeID,
CreationTime: &timestamp.Timestamp{ CreationTime: rbdSnap.CreatedAt,
Seconds: exSnap.CreatedAt,
},
ReadyToUse: true, ReadyToUse: true,
}, },
}, nil }, nil
} }
return nil, status.Errorf(codes.AlreadyExists, "Snapshot with the same name: %s but with different source volume id already exist", req.GetName())
}
rbdSnap, err := getRBDSnapshotOptions(req.GetParameters()) err = reserveSnap(rbdSnap, req.GetSecrets())
if err != nil { if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
defer func() {
// Generating Snapshot Name and Snapshot ID, as according to CSI spec they MUST be different
snapName := req.GetName()
uniqueID := uuid.NewUUID().String()
rbdVolume, err := getRBDVolumeByID(req.GetSourceVolumeId())
if err != nil { if err != nil {
return nil, status.Errorf(codes.NotFound, "Source Volume ID %s cannot found", req.GetSourceVolumeId()) errDefer := unreserveSnap(rbdSnap, req.GetSecrets())
if errDefer != nil {
klog.Warningf("failed undoing reservation of snapshot: %s %v", req.GetName(), errDefer)
} }
if !hasSnapshotFeature(rbdVolume.ImageFeatures) {
return nil, status.Errorf(codes.InvalidArgument, "volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
} }
}()
rbdSnap.VolName = rbdVolume.VolName
rbdSnap.SnapName = snapName
snapshotID := "csi-rbd-" + rbdVolume.VolName + "-snap-" + uniqueID
rbdSnap.SnapID = snapshotID
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
rbdSnap.SizeBytes = rbdVolume.VolSize
err = cs.doSnapshot(rbdSnap, req.GetSecrets()) err = cs.doSnapshot(rbdSnap, req.GetSecrets())
// if we already have the snapshot, return the snapshot
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, err
}
rbdSnap.CreatedAt = ptypes.TimestampNow().GetSeconds()
rbdSnapshots[snapshotID] = *rbdSnap
if err = storeSnapshotMetadata(rbdSnap, cs.MetadataStore); err != nil {
return nil, status.Error(codes.Internal, err.Error())
} }
return &csi.CreateSnapshotResponse{ return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{ Snapshot: &csi.Snapshot{
SizeBytes: rbdSnap.SizeBytes, SizeBytes: rbdSnap.SizeBytes,
SnapshotId: snapshotID, SnapshotId: rbdSnap.SnapID,
SourceVolumeId: req.GetSourceVolumeId(), SourceVolumeId: req.GetSourceVolumeId(),
CreationTime: &timestamp.Timestamp{ CreationTime: rbdSnap.CreatedAt,
Seconds: rbdSnap.CreatedAt,
},
ReadyToUse: true, ReadyToUse: true,
}, },
}, nil }, nil
} }
func storeSnapshotMetadata(rbdSnap *rbdSnapshot, cp util.CachePersister) error {
if err := cp.Create(rbdSnap.SnapID, rbdSnap); err != nil {
klog.Errorf("failed to store metadata for snapshot %s: %v", rbdSnap.SnapID, err)
return err
}
return nil
}
func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest) error { func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
klog.Warningf("invalid create snapshot req: %v", protosanitizer.StripSecrets(req)) klog.Warningf("invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
@ -475,41 +407,53 @@ func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest)
if len(req.SourceVolumeId) == 0 { if len(req.SourceVolumeId) == 0 {
return status.Error(codes.InvalidArgument, "Source Volume ID cannot be empty") return status.Error(codes.InvalidArgument, "Source Volume ID cannot be empty")
} }
return nil return nil
} }
func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]string) error { func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]string) (err error) {
err := createSnapshot(rbdSnap, rbdSnap.AdminID, secret) err = createSnapshot(rbdSnap, rbdSnap.AdminID, secret)
// if we already have the snapshot, return the snapshot // If snap creation fails, even due to snapname already used, fail, next attempt will get a new
// uuid for use as the snap name
if err != nil { if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok { klog.Errorf("failed to create snapshot: %v", err)
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { return status.Error(codes.Internal, err.Error())
if status.ExitStatus() == int(syscall.EEXIST) {
klog.Warningf("Snapshot with the same name: %s, we return this.", rbdSnap.SnapName)
} else {
klog.Warningf("failed to create snapshot: %v", err)
return err
} }
} else { defer func() {
klog.Warningf("failed to create snapshot: %v", err) if err != nil {
return err errDefer := deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if errDefer != nil {
klog.Errorf("failed to delete snapshot: %v", errDefer)
err = fmt.Errorf("snapshot created but failed to delete snapshot due to"+
" other failures: %v", err)
} }
} else { err = status.Error(codes.Internal, err.Error())
klog.Warningf("failed to create snapshot: %v", err)
return err
} }
} else { }()
klog.V(4).Infof("create snapshot %s", rbdSnap.SnapName)
err = protectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
err = protectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if err != nil { if err != nil {
err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret) klog.Errorf("failed to protect snapshot: %v", err)
return status.Error(codes.Internal, err.Error())
}
defer func() {
if err != nil { if err != nil {
return fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err) errDefer := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if errDefer != nil {
klog.Errorf("failed to unprotect snapshot: %v", errDefer)
err = fmt.Errorf("snapshot created but failed to unprotect snapshot due to"+
" other failures: %v", err)
} }
return errors.New("snapshot is created but failed to protect snapshot") err = status.Error(codes.Internal, err.Error())
} }
}()
err = getSnapshotMetadata(rbdSnap, rbdSnap.AdminID, secret)
if err != nil {
klog.Errorf("failed to fetch snapshot metadata: %v", err)
return status.Error(codes.Internal, err.Error())
} }
return nil return nil
} }
@ -525,8 +469,8 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
if len(snapshotID) == 0 { if len(snapshotID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty")
} }
snapshotIDMutex.LockKey(snapshotID)
snapshotIDMutex.LockKey(snapshotID)
defer func() { defer func() {
if err := snapshotIDMutex.UnlockKey(snapshotID); err != nil { if err := snapshotIDMutex.UnlockKey(snapshotID); err != nil {
klog.Warningf("failed to unlock mutex snapshot:%s %v", snapshotID, err) klog.Warningf("failed to unlock mutex snapshot:%s %v", snapshotID, err)
@ -534,95 +478,41 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
}() }()
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil { if err := genSnapFromSnapID(rbdSnap, snapshotID, req.GetSecrets()); err != nil {
if err, ok := err.(*util.CacheEntryNotFound); ok { // Consider missing snap as already deleted, and proceed to remove the omap values
klog.V(3).Infof("metadata for snapshot %s not found, assuming the snapshot to be already deleted (%v)", snapshotID, err) if _, ok := err.(ErrSnapNotFound); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
if err := unreserveSnap(rbdSnap, req.GetSecrets()); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.DeleteSnapshotResponse{}, nil return &csi.DeleteSnapshotResponse{}, nil
} }
return nil, err // lock out parallel create requests against the same snap name as we
// cleanup the image and associated omaps for the same
snapshotNameMutex.LockKey(rbdSnap.RequestName)
defer func() {
if err := snapshotNameMutex.UnlockKey(rbdSnap.RequestName); err != nil {
klog.Warningf("failed to unlock mutex snapshot:%s %v", rbdSnap.RequestName, err)
} }
}()
// Unprotect snapshot // Unprotect snapshot
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets())
if err != nil { if err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) return nil, status.Errorf(codes.FailedPrecondition,
"failed to unprotect snapshot: %s/%s with error: %v",
rbdSnap.Pool, rbdSnap.RbdSnapName, err)
} }
// Deleting snapshot // Deleting snapshot
klog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName) klog.V(4).Infof("deleting Snaphot %s", rbdSnap.RbdSnapName)
if err := deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()); err != nil { if err := deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) return nil, status.Errorf(codes.FailedPrecondition,
"failed to delete snapshot: %s/%s with error: %v",
rbdSnap.Pool, rbdSnap.RbdSnapName, err)
} }
if err := cs.MetadataStore.Delete(snapshotID); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
delete(rbdSnapshots, snapshotID)
return &csi.DeleteSnapshotResponse{}, nil return &csi.DeleteSnapshotResponse{}, nil
} }
// ListSnapshots lists the snapshots in the store
func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS); err != nil {
klog.Warningf("invalid list snapshot req: %v", req)
return nil, err
}
sourceVolumeID := req.GetSourceVolumeId()
// TODO (sngchlko) list with token
// TODO (#94) protect concurrent access to global data structures
// list only a specific snapshot which has snapshot ID
if snapshotID := req.GetSnapshotId(); len(snapshotID) != 0 {
if rbdSnap, ok := rbdSnapshots[snapshotID]; ok {
// if source volume ID also set, check source volume id on the cache.
if len(sourceVolumeID) != 0 && rbdSnap.SourceVolumeID != sourceVolumeID {
return nil, status.Errorf(codes.Unknown, "Requested Source Volume ID %s is different from %s", sourceVolumeID, rbdSnap.SourceVolumeID)
}
return &csi.ListSnapshotsResponse{
Entries: []*csi.ListSnapshotsResponse_Entry{
{
Snapshot: &csi.Snapshot{
SizeBytes: rbdSnap.SizeBytes,
SnapshotId: rbdSnap.SnapID,
SourceVolumeId: rbdSnap.SourceVolumeID,
CreationTime: &timestamp.Timestamp{
Seconds: rbdSnap.CreatedAt,
},
ReadyToUse: true,
},
},
},
}, nil
}
return nil, status.Errorf(codes.NotFound, "Snapshot ID %s cannot found", snapshotID)
}
entries := []*csi.ListSnapshotsResponse_Entry{}
for _, rbdSnap := range rbdSnapshots {
// if source volume ID also set, check source volume id on the cache.
if len(sourceVolumeID) != 0 && rbdSnap.SourceVolumeID != sourceVolumeID {
continue
}
entries = append(entries, &csi.ListSnapshotsResponse_Entry{
Snapshot: &csi.Snapshot{
SizeBytes: rbdSnap.SizeBytes,
SnapshotId: rbdSnap.SnapID,
SourceVolumeId: rbdSnap.SourceVolumeID,
CreationTime: &timestamp.Timestamp{
Seconds: rbdSnap.CreatedAt,
},
ReadyToUse: true,
},
})
}
return &csi.ListSnapshotsResponse{
Entries: entries,
}, nil
}

View File

@ -1,81 +0,0 @@
package rbd
import (
"testing"
"github.com/ceph/ceph-csi/pkg/util"
)
type testCachePersister struct {
volumes map[string]rbdVolume
snapshots map[string]rbdSnapshot
}
func (t *testCachePersister) Create(identifier string, data interface{}) error {
return nil
}
func (t *testCachePersister) Get(identifier string, data interface{}) error {
return nil
}
func (t *testCachePersister) ForAll(pattern string, destObj interface{}, f util.ForAllFunc) error {
switch pattern {
case "csi-rbd-vol-":
for identifier, vol := range t.volumes {
*destObj.(*rbdVolume) = vol
if err := f(identifier); err != nil {
return err
}
}
case "csi-rbd-(.*)-snap-":
for identifier, snap := range t.snapshots {
*destObj.(*rbdSnapshot) = snap
if err := f(identifier); err != nil {
return err
}
}
}
return nil
}
func (t *testCachePersister) Delete(identifier string) error {
return nil
}
func TestLoadExDataFromMetadataStore(t *testing.T) {
cs := &ControllerServer{
MetadataStore: &testCachePersister{
volumes: map[string]rbdVolume{
"item1": {
VolID: "1",
},
"item2": {
VolID: "2",
},
},
snapshots: map[string]rbdSnapshot{
"item1": {
SnapID: "1",
},
"item2": {
SnapID: "2",
},
},
},
}
if err := cs.LoadExDataFromMetadataStore(); err != nil {
t.Error(err)
}
if rbdVolumes["item1"] == rbdVolumes["item2"] {
t.Error("rbd volume entries contain pointer to same volume")
}
if rbdSnapshots["item1"] == rbdSnapshots["item2"] {
t.Error("rbd snapshot entries contain pointer to same snapshot")
}
}

60
pkg/rbd/errors.go Normal file
View File

@ -0,0 +1,60 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
// ErrImageNotFound is returned when image name is not found in the cluster on the given pool
type ErrImageNotFound struct {
imageName string
err error
}
func (e ErrImageNotFound) Error() string {
return e.err.Error()
}
// ErrSnapNotFound is returned when snap name passed is not found in the list of snapshots for the
// given image
type ErrSnapNotFound struct {
snapName string
err error
}
func (e ErrSnapNotFound) Error() string {
return e.err.Error()
}
// ErrSnapNameConflict is generated when a requested CSI snap name already exists on RBD but with
// different properties, and hence is in conflict with the passed in CSI volume name
type ErrSnapNameConflict struct {
requestName string
err error
}
func (e ErrSnapNameConflict) Error() string {
return e.err.Error()
}
// ErrVolNameConflict is generated when a requested CSI volume name already exists on RBD but with
// different properties, and hence is in conflict with the passed in CSI volume name
type ErrVolNameConflict struct {
requestName string
err error
}
func (e ErrVolNameConflict) Error() string {
return e.err.Error()
}

View File

@ -24,6 +24,7 @@ import (
"strings" "strings"
"github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -47,14 +48,25 @@ type NodeServer struct {
// path // path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath() targetPath := req.GetTargetPath()
targetPathMutex.LockKey(targetPath) if targetPath == "" {
disableInUseChecks := false return nil, status.Error(codes.InvalidArgument, "Empty target path in request")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Empty volume capability in request")
}
if req.GetVolumeId() == "" {
return nil, status.Error(codes.InvalidArgument, "Empty volume ID in request")
}
targetPathMutex.LockKey(targetPath)
defer func() { defer func() {
if err := targetPathMutex.UnlockKey(targetPath); err != nil { if err := targetPathMutex.UnlockKey(targetPath); err != nil {
klog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err) klog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err)
} }
}() }()
disableInUseChecks := false
volName, err := ns.getVolumeName(req) volName, err := ns.getVolumeName(req)
if err != nil { if err != nil {
@ -82,11 +94,11 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
} }
} }
volOptions, err := getRBDVolumeOptions(req.GetVolumeContext(), disableInUseChecks) volOptions, err := genVolFromVolumeOptions(req.GetVolumeContext(), disableInUseChecks)
if err != nil { if err != nil {
return nil, err return nil, err
} }
volOptions.VolName = volName volOptions.RbdImageName = volName
// Mapping RBD image // Mapping RBD image
devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets()) devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets())
if err != nil { if err != nil {
@ -103,22 +115,15 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
} }
func (ns *NodeServer) getVolumeName(req *csi.NodePublishVolumeRequest) (string, error) { func (ns *NodeServer) getVolumeName(req *csi.NodePublishVolumeRequest) (string, error) {
var volName string var vi util.CSIIdentifier
isBlock := req.GetVolumeCapability().GetBlock() != nil
targetPath := req.GetTargetPath() err := vi.DecomposeCSIID(req.GetVolumeId())
if isBlock { if err != nil {
// Get volName from targetPath klog.Errorf("error decoding volume ID (%s) (%s)", err, req.GetVolumeId())
s := strings.Split(targetPath, "/") return "", status.Error(codes.InvalidArgument, err.Error())
volName = s[len(s)-1]
} else {
// Get volName from targetPath
if !strings.HasSuffix(targetPath, "/mount") {
return "", fmt.Errorf("rbd: malformed the value of target path: %s", targetPath)
} }
s := strings.Split(strings.TrimSuffix(targetPath, "/mount"), "/")
volName = s[len(s)-1] return rbdImgNamePrefix + vi.ObjectUUID, nil
}
return volName, nil
} }
func (ns *NodeServer) mountVolume(req *csi.NodePublishVolumeRequest, devicePath string) error { func (ns *NodeServer) mountVolume(req *csi.NodePublishVolumeRequest, devicePath string) error {
@ -187,6 +192,14 @@ func (ns *NodeServer) createTargetPath(targetPath string, isBlock bool) (bool, e
// NodeUnpublishVolume unmounts the volume from the target path // NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
targetPath := req.GetTargetPath() targetPath := req.GetTargetPath()
if targetPath == "" {
return nil, status.Error(codes.InvalidArgument, "Empty target path in request")
}
if req.GetVolumeId() == "" {
return nil, status.Error(codes.InvalidArgument, "Empty volume ID in request")
}
targetPathMutex.LockKey(targetPath) targetPathMutex.LockKey(targetPath)
defer func() { defer func() {

View File

@ -27,10 +27,101 @@ import (
"k8s.io/utils/nsenter" "k8s.io/utils/nsenter"
) )
// PluginFolder defines the location of rbdplugin /*
RADOS omaps usage:
This note details how we preserve idempotent nature of create requests and retain the relationship
between orchestrator (CO) generated Names and plugin generated names for images and snapshots
The implementation uses Ceph RADOS omaps to preserve the relationship between request name and
generated image (or snapshot) name. There are 4 types of omaps in use,
- A "csi.volumes.[csi-id]" (or "csi.volumes"+.+CSIInstanceID), we call this the csiVolsDirectory
- stores keys named using the CO generated names for volume requests
- keys are named "csi.volume."+[CO generated VolName]
- Key value contains the RBD image uuid that is created or will be created, for the CO provided
name
- A "csi.snaps.[csi-id]" (or "csi.snaps"+.+CSIInstanceID), we refer to this as the csiSnapsDirectory
- stores keys named using the CO generated names for snapshot requests
- keys are named "csi.snap."+[CO generated SnapName]
- Key value contains the RBD snapshot uuid that is created or will be created, for the CO
provided name
- A per image omap named "rbd.csi.volume."+[RBD image uuid], we refer to this as the rbdImageOMap
- stores a single key named "csi.volname", that has the value of the CO generated VolName that
this image refers to
- A per snapshot omap named "rbd.csi.snap."+[RBD snapshot uuid], we refer to this as the snapOMap
- stores a key named "csi.snapname", that has the value of the CO generated SnapName that this
snapshot refers to
- also stores another key named "csi.source", that has the value of the image name that is the
source of the snapshot
Creation of omaps:
When a volume create request is received (or a snapshot create, the snapshot is not detailed in this
comment further as the process is similar),
- The csiVolsDirectory is consulted to find if there is already a key with the CO VolName, and if present,
it is used to read its references to reach the RBD image that backs this VolName, to check if the
RBD image can satisfy the requirements for the request
- If during the process of checking the same, it is found that some linking information is stale
or missing, the corresponding keys upto the key in the csiVolsDirectory is cleaned up, to start afresh
- If the key with the CO VolName is not found, or was cleaned up, the request is treated as a
new create request, and an rbdImageOMap is created first with a generated uuid, this ensures that we
do not use a uuid that is already in use
- Next, a key with the VolName is created in the csiVolsDirectory, and its value is updated to store the
generated uuid
- This is followed by updating the rbdImageOMap with the VolName in the rbdImageCSIVolNameKey
- Finally, the image is created (or promoted from a snapshot, if content source was provided) using
the uuid and a corresponding image name prefix (rbdImgNamePrefix or rbdSnapNamePrefix)
The entire operation is locked based on VolName hash, to ensure there is only ever a single entity
modifying the related omaps for a given VolName.
This ensures idempotent nature of creates, as the same CO generated VolName would attempt to use
the same RBD image name to serve the request, as the relations are saved in the respective omaps.
Deletion of omaps:
Delete requests would not contain the VolName, hence deletion uses the volume ID, which is encoded
with the image name in it, to find the image and the rbdImageOMap. The rbdImageOMap is read to get
the VolName that this image points to. This VolName can be further used to read and delete the key
from the csiVolsDirectory.
As we trace back and find the VolName, we also take a hash based lock on the VolName before
proceeding with deleting the image and the related omap entries, to ensure there is only ever a
single entity modifying the related omaps for a given VolName.
*/
const ( const (
// volIDVersion is the version number of volume ID encoding scheme
volIDVersion uint16 = 1
rbdDefaultAdminID = "admin" rbdDefaultAdminID = "admin"
rbdDefaultUserID = rbdDefaultAdminID rbdDefaultUserID = rbdDefaultAdminID
// csiConfigFile is the location of the CSI config file
csiConfigFile = "/etc/ceph-csi-config/config.json"
// CSI volume-name keyname prefix, for key in csiVolsDirectory, suffix is the CSI passed volume name
csiVolNameKeyPrefix = "csi.volume."
// Per RBD image object map name prefix, suffix is the RBD image uuid
rbdImageOMapPrefix = "csi.volume."
// CSI volume-name key in per RBD image object map, containing CSI volume-name for which the
// image was created
rbdImageCSIVolNameKey = "csi.volname"
// RBD image name prefix, suffix is a uuid generated per image
rbdImgNamePrefix = "csi-vol-"
//CSI snap-name keyname prefix, for key in csiSnapsDirectory, suffix is the CSI passed snapshot name
csiSnapNameKeyPrefix = "csi.snap."
// Per RBD snapshot object map name prefix, suffix is the RBD image uuid
rbdSnapOMapPrefix = "csi.snap."
// CSI snap-name key in per RBD snapshot object map, containing CSI snapshot-name for which the
// snapshot was created
rbdSnapCSISnapNameKey = "csi.snapname"
// source image name key in per RBD snapshot object map, containing RBD source image name for
// which the snapshot was created
rbdSnapSourceImageKey = "csi.source"
// RBD snapshot name prefix, suffix is a uuid generated per snapshot
rbdSnapNamePrefix = "csi-snap-"
) )
// PluginFolder defines the location of ceph plugin // PluginFolder defines the location of ceph plugin
@ -47,8 +138,14 @@ type Driver struct {
var ( var (
version = "1.0.0" version = "1.0.0"
// confStore is the global config store // CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing
confStore *util.ConfigStore // ceph clusters across CSI instances, to differentiate omap names per CSI instance
CSIInstanceID = "default"
// csiVolsDirectory is the name of the CSI volumes object map that contains CSI volume-name
// based keys
csiVolsDirectory = "csi.volumes"
// csiSnapsDirectory is the name of the CSI snapshots object map that contains CSI snapshot-name based keys
csiSnapsDirectory = "csi.snaps"
) )
// NewDriver returns new rbd driver // NewDriver returns new rbd driver
@ -64,10 +161,9 @@ func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
} }
// NewControllerServer initialize a controller server for rbd CSI driver // NewControllerServer initialize a controller server for rbd CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer { func NewControllerServer(d *csicommon.CSIDriver) *ControllerServer {
return &ControllerServer{ return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d), DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
} }
} }
@ -89,16 +185,23 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err
// Run start a non-blocking grpc controller,node and identityserver for // Run start a non-blocking grpc controller,node and identityserver for
// rbd CSI driver which can serve multiple parallel requests // rbd CSI driver which can serve multiple parallel requests
func (r *Driver) Run(driverName, nodeID, endpoint, configRoot string, containerized bool, cachePersister util.CachePersister) { func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containerized bool) {
var err error var err error
klog.Infof("Driver: %v version: %v", driverName, version) klog.Infof("Driver: %v version: %v", driverName, version)
// Initialize config store // Create ceph.conf for use with CLI commands
confStore, err = util.NewConfigStore(configRoot) if err = util.WriteCephConfig(); err != nil {
if err != nil { klog.Fatalf("failed to write ceph configuration file (%v)", err)
klog.Fatalln("Failed to initialize config store.")
} }
// Use passed in instance ID, if provided for omap suffix naming
if instanceID != "" {
CSIInstanceID = instanceID
}
csiVolsDirectory = csiVolsDirectory + "." + CSIInstanceID
csiSnapsDirectory = csiSnapsDirectory + "." + CSIInstanceID
// Initialize default library driver // Initialize default library driver
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID) r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
if r.cd == nil { if r.cd == nil {
@ -106,9 +209,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint, configRoot string, containeri
} }
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME, csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
}) })
@ -127,11 +228,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint, configRoot string, containeri
klog.Fatalf("failed to start node server, err %v\n", err) klog.Fatalf("failed to start node server, err %v\n", err)
} }
r.cs = NewControllerServer(r.cd, cachePersister) r.cs = NewControllerServer(r.cd)
if err = r.cs.LoadExDataFromMetadataStore(); err != nil {
klog.Fatalf("failed to load metadata from store, err %v\n", err)
}
s := csicommon.NewNonBlockingGRPCServer() s := csicommon.NewNonBlockingGRPCServer()
s.Start(endpoint, r.ids, r.cs, r.ns) s.Start(endpoint, r.ids, r.cs, r.ns)

View File

@ -227,7 +227,7 @@ func checkRbdNbdTools() bool {
func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string]string) (string, error) { func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string]string) (string, error) {
var err error var err error
image := volOptions.VolName image := volOptions.RbdImageName
imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image) imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image)
useNBD := false useNBD := false
@ -271,16 +271,11 @@ func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string
} }
func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (string, error) { func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (string, error) {
image := volOpt.VolName image := volOpt.RbdImageName
imagePath := fmt.Sprintf("%s/%s", volOpt.Pool, image) imagePath := fmt.Sprintf("%s/%s", volOpt.Pool, image)
mon, err := getMon(volOpt, creds) klog.V(5).Infof("rbd: map mon %s", volOpt.Monitors)
if err != nil { key, err := getKey(userID, creds)
return "", err
}
klog.V(5).Infof("rbd: map mon %s", mon)
key, err := getRBDKey(volOpt.ClusterID, userID, creds)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -293,7 +288,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri
} }
output, err := execCommand(cmdName, []string{ output, err := execCommand(cmdName, []string{
"map", imagePath, "--id", userID, "-m", mon, "--key=" + key}) "map", imagePath, "--id", userID, "-m", volOpt.Monitors, "--key=" + key})
if err != nil { if err != nil {
klog.Warningf("rbd: map error %v, rbd output: %s", err, string(output)) klog.Warningf("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
@ -306,7 +301,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri
} }
func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, credentials map[string]string) error { func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, credentials map[string]string) error {
image := volOptions.VolName image := volOptions.RbdImageName
imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image) imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image)
err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := wait.ExponentialBackoff(backoff, func() (bool, error) {

View File

@ -17,11 +17,16 @@ limitations under the License.
package rbd package rbd
import ( import (
"encoding/json"
"fmt" "fmt"
"os/exec" "os/exec"
"strings" "strings"
"time" "time"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog" "k8s.io/klog"
@ -39,35 +44,45 @@ const (
rbdDefaultMounter = "rbd" rbdDefaultMounter = "rbd"
) )
// rbdVolume represents a CSI volume and its RBD image specifics
type rbdVolume struct { type rbdVolume struct {
VolName string `json:"volName"` // RbdImageName is the name of the RBD image backing this rbdVolume
VolID string `json:"volID"` // VolID is the volume ID that is exchanged with CSI drivers, identifying this rbdVol
Monitors string `json:"monitors"` // RequestName is the CSI generated volume name for the rbdVolume
MonValueFromSecret string `json:"monValueFromSecret"` RbdImageName string
Pool string `json:"pool"` VolID string
ImageFormat string `json:"imageFormat"` Monitors string
ImageFeatures string `json:"imageFeatures"` Pool string
VolSize int64 `json:"volSize"` ImageFormat string
AdminID string `json:"adminId"` ImageFeatures string
UserID string `json:"userId"` VolSize int64
Mounter string `json:"mounter"` AdminID string
DisableInUseChecks bool `json:"disableInUseChecks"` UserID string
ClusterID string `json:"clusterId"` Mounter string
DisableInUseChecks bool
ClusterID string
RequestName string
} }
// rbdSnapshot represents a CSI snapshot and its RBD snapshot specifics
type rbdSnapshot struct { type rbdSnapshot struct {
SourceVolumeID string `json:"sourceVolumeID"` // SourceVolumeID is the volume ID of RbdImageName, that is exchanged with CSI drivers
VolName string `json:"volName"` // RbdImageName is the name of the RBD image, that is this rbdSnapshot's source image
SnapName string `json:"snapName"` // RbdSnapName is the name of the RBD snapshot backing this rbdSnapshot
SnapID string `json:"sanpID"` // SnapID is the snapshot ID that is exchanged with CSI drivers, identifying this rbdSnapshot
Monitors string `json:"monitors"` // RequestName is the CSI generated snapshot name for the rbdSnapshot
MonValueFromSecret string `json:"monValueFromSecret"` SourceVolumeID string
Pool string `json:"pool"` RbdImageName string
CreatedAt int64 `json:"createdAt"` RbdSnapName string
SizeBytes int64 `json:"sizeBytes"` SnapID string
AdminID string `json:"adminId"` Monitors string
UserID string `json:"userId"` Pool string
ClusterID string `json:"clusterId"` CreatedAt *timestamp.Timestamp
SizeBytes int64
AdminID string
UserID string
ClusterID string
RequestName string
} }
var ( var (
@ -87,67 +102,36 @@ var (
supportedFeatures = sets.NewString("layering") supportedFeatures = sets.NewString("layering")
) )
func getRBDKey(clusterid, id string, credentials map[string]string) (string, error) { func getKey(id string, credentials map[string]string) (string, error) {
var ( var (
ok bool
err error
key string key string
ok bool
) )
if key, ok = credentials[id]; !ok { if key, ok = credentials[id]; !ok {
if clusterid != "" {
key, err = confStore.KeyForUser(clusterid, id)
if err != nil {
return "", fmt.Errorf("RBD key for ID: %s not found in config store of clusterID (%s)", id, clusterid)
}
} else {
return "", fmt.Errorf("RBD key for ID: %s not found", id) return "", fmt.Errorf("RBD key for ID: %s not found", id)
} }
}
return key, nil return key, nil
} }
func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) { // createImage creates a new ceph image with provision and volume options.
mon := pOpts.Monitors func createImage(pOpts *rbdVolume, volSz int64, adminID string, credentials map[string]string) error {
if len(mon) == 0 {
// if mons are set in secret, retrieve them
if len(pOpts.MonValueFromSecret) == 0 {
// yet another sanity check
return "", errors.New("either monitors or monValueFromSecret must be set")
}
val, ok := credentials[pOpts.MonValueFromSecret]
if !ok {
return "", fmt.Errorf("mon data %s is not set in secret", pOpts.MonValueFromSecret)
}
mon = val
}
return mon, nil
}
// CreateImage creates a new ceph image with provision and volume options.
func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map[string]string) error {
var output []byte var output []byte
mon, err := getMon(pOpts, credentials) image := pOpts.RbdImageName
if err != nil {
return err
}
image := pOpts.VolName
volSzMiB := fmt.Sprintf("%dM", volSz) volSzMiB := fmt.Sprintf("%dM", volSz)
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) key, err := getKey(adminID, credentials)
if err != nil { if err != nil {
return err return err
} }
if pOpts.ImageFormat == rbdImageFormat2 { if pOpts.ImageFormat == rbdImageFormat2 {
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzMiB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool) klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzMiB, pOpts.ImageFormat, pOpts.ImageFeatures, pOpts.Monitors, pOpts.Pool)
} else { } else {
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzMiB, pOpts.ImageFormat, mon, pOpts.Pool) klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzMiB, pOpts.ImageFormat, pOpts.Monitors, pOpts.Pool)
} }
args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat} args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", adminID, "-m", pOpts.Monitors, "--key=" + key, "--image-format", pOpts.ImageFormat}
if pOpts.ImageFormat == rbdImageFormat2 { if pOpts.ImageFormat == rbdImageFormat2 {
args = append(args, "--image-feature", pOpts.ImageFeatures) args = append(args, "--image-feature", pOpts.ImageFeatures)
} }
@ -166,21 +150,16 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (
var output string var output string
var cmd []byte var cmd []byte
image := pOpts.VolName image := pOpts.RbdImageName
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
key, err := getRBDKey(pOpts.ClusterID, userID, credentials) key, err := getKey(userID, credentials)
if err != nil { if err != nil {
return false, "", err return false, "", err
} }
mon, err := getMon(pOpts, credentials) klog.V(4).Infof("rbd: status %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
if err != nil { args := []string{"status", image, "--pool", pOpts.Pool, "-m", pOpts.Monitors, "--id", userID, "--key=" + key}
return false, "", err
}
klog.V(4).Infof("rbd: status %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"status", image, "--pool", pOpts.Pool, "-m", mon, "--id", userID, "--key=" + key}
cmd, err = execCommand("rbd", args) cmd, err = execCommand("rbd", args)
output = string(cmd) output = string(cmd)
@ -205,10 +184,11 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (
return false, output, nil return false, output, nil
} }
// DeleteImage deletes a ceph image with provision and volume options. // deleteImage deletes a ceph image with provision and volume options.
func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]string) error { func deleteImage(pOpts *rbdVolume, adminID string, credentials map[string]string) error {
var output []byte var output []byte
image := pOpts.VolName
image := pOpts.RbdImageName
found, _, err := rbdStatus(pOpts, adminID, credentials) found, _, err := rbdStatus(pOpts, adminID, credentials)
if err != nil { if err != nil {
return err return err
@ -217,22 +197,190 @@ func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]str
klog.Info("rbd is still being used ", image) klog.Info("rbd is still being used ", image)
return fmt.Errorf("rbd %s is still being used", image) return fmt.Errorf("rbd %s is still being used", image)
} }
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) key, err := getKey(adminID, credentials)
if err != nil {
return err
}
mon, err := getMon(pOpts, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: rm %s using mon %s, pool %s", image, mon, pOpts.Pool) klog.V(4).Infof("rbd: rm %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key} args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminID, "-m", pOpts.Monitors,
"--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
if err == nil { if err != nil {
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return err
}
err = unreserveVol(pOpts, credentials)
if err != nil {
klog.Errorf("failed to remove reservation for volume (%s) with backing image (%s) (%s)",
pOpts.RequestName, pOpts.RbdImageName, err)
err = nil
}
return err
}
// updateSnapWithImageInfo updates provided rbdSnapshot with information from on-disk data
// regarding the same
func updateSnapWithImageInfo(rbdSnap *rbdSnapshot, credentials map[string]string) error {
key, err := getKey(rbdSnap.AdminID, credentials)
if err != nil {
return err
}
snapInfo, err := getSnapInfo(rbdSnap.Monitors, rbdSnap.AdminID, key,
rbdSnap.Pool, rbdSnap.RbdImageName, rbdSnap.RbdSnapName)
if err != nil {
return err
}
rbdSnap.SizeBytes = snapInfo.Size
tm, err := time.Parse(time.ANSIC, snapInfo.Timestamp)
if err != nil {
return err
}
rbdSnap.CreatedAt, err = ptypes.TimestampProto(tm)
return err
}
// updateVolWithImageInfo updates provided rbdVolume with information from on-disk data
// regarding the same
func updateVolWithImageInfo(rbdVol *rbdVolume, credentials map[string]string) error {
key, err := getKey(rbdVol.AdminID, credentials)
if err != nil {
return err
}
imageInfo, err := getImageInfo(rbdVol.Monitors, rbdVol.AdminID, key,
rbdVol.Pool, rbdVol.RbdImageName)
if err != nil {
return err
}
if imageInfo.Format != 2 {
return fmt.Errorf("unknown or unsupported image format (%d) returned for image (%s)",
imageInfo.Format, rbdVol.RbdImageName)
}
rbdVol.ImageFormat = rbdImageFormat2
rbdVol.VolSize = imageInfo.Size
rbdVol.ImageFeatures = strings.Join(imageInfo.Features, ",")
return nil return nil
} }
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
// genSnapFromSnapID generates a rbdSnapshot structure from the provided identifier, updating
// the structure with elements from on-disk snapshot metadata as well
func genSnapFromSnapID(rbdSnap *rbdSnapshot, snapshotID string, credentials map[string]string) error {
var (
options map[string]string
vi util.CSIIdentifier
)
options = make(map[string]string)
rbdSnap.SnapID = snapshotID
err := vi.DecomposeCSIID(rbdSnap.SnapID)
if err != nil {
klog.Errorf("error decoding snapshot ID (%s) (%s)", err, rbdSnap.SnapID)
return err
}
rbdSnap.ClusterID = vi.ClusterID
options["clusterID"] = rbdSnap.ClusterID
rbdSnap.RbdSnapName = rbdSnapNamePrefix + vi.ObjectUUID
rbdSnap.Monitors, _, err = getMonsAndClusterID(options)
if err != nil {
return err
}
rbdSnap.AdminID, rbdSnap.UserID = getIDs(options)
key, err := getKey(rbdSnap.AdminID, credentials)
if err != nil {
return err
}
rbdSnap.Pool, err = util.GetPoolName(rbdSnap.Monitors, rbdSnap.AdminID, key, vi.PoolID)
if err != nil {
return err
}
// TODO: fetch all omap vals in one call, than make multiple listomapvals
snapUUID := strings.TrimPrefix(rbdSnap.RbdSnapName, rbdSnapNamePrefix)
rbdSnap.RequestName, err = util.GetOMapValue(rbdSnap.Monitors, rbdSnap.AdminID,
key, rbdSnap.Pool, rbdSnapOMapPrefix+snapUUID, rbdSnapCSISnapNameKey)
if err != nil {
return err
}
rbdSnap.RbdImageName, err = util.GetOMapValue(rbdSnap.Monitors, rbdSnap.AdminID,
key, rbdSnap.Pool, rbdSnapOMapPrefix+snapUUID, rbdSnapSourceImageKey)
if err != nil {
return err
}
err = updateSnapWithImageInfo(rbdSnap, credentials)
return err
}
// genVolFromVolID generates a rbdVolume structure from the provided identifier, updating
// the structure with elements from on-disk image metadata as well
func genVolFromVolID(rbdVol *rbdVolume, volumeID string, credentials map[string]string) error {
var (
options map[string]string
vi util.CSIIdentifier
)
options = make(map[string]string)
// rbdVolume fields that are not filled up in this function are:
// Mounter, MultiNodeWritable
rbdVol.VolID = volumeID
err := vi.DecomposeCSIID(rbdVol.VolID)
if err != nil {
klog.V(4).Infof("error decoding volume ID (%s) (%s)", err, rbdVol.VolID)
return err
}
rbdVol.ClusterID = vi.ClusterID
options["clusterID"] = rbdVol.ClusterID
rbdVol.RbdImageName = rbdImgNamePrefix + vi.ObjectUUID
rbdVol.Monitors, _, err = getMonsAndClusterID(options)
if err != nil {
return err
}
rbdVol.AdminID, rbdVol.UserID = getIDs(options)
key, err := getKey(rbdVol.AdminID, credentials)
if err != nil {
return err
}
rbdVol.Pool, err = util.GetPoolName(rbdVol.Monitors, rbdVol.AdminID, key,
vi.PoolID)
if err != nil {
return err
}
imageUUID := strings.TrimPrefix(rbdVol.RbdImageName, rbdImgNamePrefix)
rbdVol.RequestName, err = util.GetOMapValue(rbdVol.Monitors, rbdVol.AdminID,
key, rbdVol.Pool, rbdImageOMapPrefix+imageUUID, rbdImageCSIVolNameKey)
if err != nil {
return err
}
err = updateVolWithImageInfo(rbdVol, credentials)
return err return err
} }
@ -242,41 +390,29 @@ func execCommand(command string, args []string) ([]byte, error) {
return cmd.CombinedOutput() return cmd.CombinedOutput()
} }
func getMonsAndClusterID(options map[string]string) (monitors, clusterID, monInSecret string, err error) { func getMonsAndClusterID(options map[string]string) (monitors, clusterID string, err error) {
var ok bool var ok bool
monitors, ok = options["monitors"]
if !ok {
// if mons are not set in options, check if they are set in secret
if monInSecret, ok = options["monValueFromSecret"]; !ok {
// if mons are not in secret, check if we have a cluster-id
if clusterID, ok = options["clusterID"]; !ok { if clusterID, ok = options["clusterID"]; !ok {
err = errors.New("either monitors or monValueFromSecret or clusterID must be set") err = errors.New("clusterID must be set")
return return
} }
if monitors, err = confStore.Mons(clusterID); err != nil { if monitors, err = util.Mons(csiConfigFile, clusterID); err != nil {
klog.Errorf("failed getting mons (%s)", err) klog.Errorf("failed getting mons (%s)", err)
err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", clusterID) err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", clusterID)
return return
} }
}
}
return return
} }
func getIDs(options map[string]string, clusterID string) (adminID, userID string, err error) { func getIDs(options map[string]string) (adminID, userID string) {
var ok bool var ok bool
adminID, ok = options["adminid"] adminID, ok = options["adminid"]
switch { switch {
case ok: case ok:
case clusterID != "":
if adminID, err = confStore.AdminID(clusterID); err != nil {
klog.Errorf("failed getting adminID (%s)", err)
return "", "", fmt.Errorf("failed to fetch adminID for clusterID (%s)", clusterID)
}
default: default:
adminID = rbdDefaultAdminID adminID = rbdDefaultAdminID
} }
@ -284,19 +420,14 @@ func getIDs(options map[string]string, clusterID string) (adminID, userID string
userID, ok = options["userid"] userID, ok = options["userid"]
switch { switch {
case ok: case ok:
case clusterID != "":
if userID, err = confStore.UserID(clusterID); err != nil {
klog.Errorf("failed getting userID (%s)", err)
return "", "", fmt.Errorf("failed to fetch userID using clusterID (%s)", clusterID)
}
default: default:
userID = rbdDefaultUserID userID = rbdDefaultUserID
} }
return adminID, userID, err return adminID, userID
} }
func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) { func genVolFromVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) {
var ( var (
ok bool ok bool
err error err error
@ -308,7 +439,7 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool)
return nil, errors.New("missing required parameter pool") return nil, errors.New("missing required parameter pool")
} }
rbdVol.Monitors, rbdVol.ClusterID, rbdVol.MonValueFromSecret, err = getMonsAndClusterID(volOptions) rbdVol.Monitors, rbdVol.ClusterID, err = getMonsAndClusterID(volOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -326,7 +457,8 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool)
arr := strings.Split(imageFeatures, ",") arr := strings.Split(imageFeatures, ",")
for _, f := range arr { for _, f := range arr {
if !supportedFeatures.Has(f) { if !supportedFeatures.Has(f) {
return nil, fmt.Errorf("invalid feature %q for volume csi-rbdplugin, supported features are: %v", f, supportedFeatures) return nil, fmt.Errorf("invalid feature %q for volume csi-rbdplugin, supported"+
" features are: %v", f, supportedFeatures)
} }
} }
rbdVol.ImageFeatures = imageFeatures rbdVol.ImageFeatures = imageFeatures
@ -337,55 +469,43 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool)
klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks) klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks)
rbdVol.DisableInUseChecks = disableInUseChecks rbdVol.DisableInUseChecks = disableInUseChecks
err = getCredsFromVol(rbdVol, volOptions) getCredsFromVol(rbdVol, volOptions)
if err != nil {
return nil, err
}
return rbdVol, nil return rbdVol, nil
} }
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error { func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) {
var ( var ok bool
ok bool
err error
)
rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.ClusterID) rbdVol.AdminID, rbdVol.UserID = getIDs(volOptions)
if err != nil {
return err
}
rbdVol.Mounter, ok = volOptions["mounter"] rbdVol.Mounter, ok = volOptions["mounter"]
if !ok { if !ok {
rbdVol.Mounter = rbdDefaultMounter rbdVol.Mounter = rbdDefaultMounter
} }
return err
} }
func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) { func genSnapFromOptions(rbdVol *rbdVolume, snapOptions map[string]string) *rbdSnapshot {
var ( var (
ok bool
err error err error
ok bool
) )
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
rbdSnap.Pool, ok = snapOptions["pool"] rbdSnap.Pool, ok = snapOptions["pool"]
if !ok { if !ok {
return nil, errors.New("missing required parameter pool") rbdSnap.Pool = rbdVol.Pool
} }
rbdSnap.Monitors, rbdSnap.ClusterID, rbdSnap.MonValueFromSecret, err = getMonsAndClusterID(snapOptions) rbdSnap.Monitors, rbdSnap.ClusterID, err = getMonsAndClusterID(snapOptions)
if err != nil { if err != nil {
return nil, err rbdSnap.Monitors = rbdVol.Monitors
rbdSnap.ClusterID = rbdVol.ClusterID
} }
rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.ClusterID) rbdSnap.AdminID, rbdSnap.UserID = getIDs(snapOptions)
if err != nil {
return nil, err return rbdSnap
}
return rbdSnap, nil
} }
func hasSnapshotFeature(imageFeatures string) bool { func hasSnapshotFeature(imageFeatures string) bool {
@ -398,67 +518,20 @@ func hasSnapshotFeature(imageFeatures string) bool {
return false return false
} }
func getRBDVolumeByID(volumeID string) (*rbdVolume, error) {
if rbdVol, ok := rbdVolumes[volumeID]; ok {
return &rbdVol, nil
}
return nil, fmt.Errorf("volume id %s does not exit in the volumes list", volumeID)
}
func getRBDVolumeByName(volName string) (*rbdVolume, error) {
for _, rbdVol := range rbdVolumes {
if rbdVol.VolName == volName {
v := rbdVol
return &v, nil
}
}
return nil, fmt.Errorf("volume name %s does not exit in the volumes list", volName)
}
func getRBDSnapshotByName(snapName string) (*rbdSnapshot, error) {
for _, rbdSnap := range rbdSnapshots {
if rbdSnap.SnapName == snapName {
s := rbdSnap
return &s, nil
}
}
return nil, fmt.Errorf("snapshot name %s does not exit in the snapshots list", snapName)
}
func getSnapMon(pOpts *rbdSnapshot, credentials map[string]string) (string, error) {
mon := pOpts.Monitors
if len(mon) == 0 {
// if mons are set in secret, retrieve them
if len(pOpts.MonValueFromSecret) == 0 {
// yet another sanity check
return "", errors.New("either monitors or monValueFromSecret must be set")
}
val, ok := credentials[pOpts.MonValueFromSecret]
if !ok {
return "", fmt.Errorf("mon data %s is not set in secret", pOpts.MonValueFromSecret)
}
mon = val
}
return mon, nil
}
func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte var output []byte
image := pOpts.VolName image := pOpts.RbdImageName
snapID := pOpts.SnapID snapName := pOpts.RbdSnapName
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) key, err := getKey(adminID, credentials)
if err != nil {
return err
}
mon, err := getSnapMon(pOpts, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s ", image, mon, pOpts.Pool) klog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s ", image, pOpts.Monitors, pOpts.Pool)
args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key} args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
adminID, "-m", pOpts.Monitors, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
@ -469,54 +542,19 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]
return nil return nil
} }
func extractStoredVolOpt(r rbdVolume) map[string]string {
volOptions := make(map[string]string)
volOptions["pool"] = r.Pool
if len(r.Monitors) > 0 {
volOptions["monitors"] = r.Monitors
}
if len(r.MonValueFromSecret) > 0 {
volOptions["monValueFromSecret"] = r.MonValueFromSecret
}
volOptions["imageFormat"] = r.ImageFormat
if len(r.ImageFeatures) > 0 {
volOptions["imageFeatures"] = r.ImageFeatures
}
if len(r.AdminID) > 0 {
volOptions["adminId"] = r.AdminID
}
if len(r.UserID) > 0 {
volOptions["userId"] = r.UserID
}
if len(r.Mounter) > 0 {
volOptions["mounter"] = r.Mounter
}
return volOptions
}
func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte var output []byte
mon, err := getSnapMon(pOpts, credentials) image := pOpts.RbdImageName
snapName := pOpts.RbdSnapName
key, err := getKey(adminID, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: snap create %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
image := pOpts.VolName args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapName, image,
snapID := pOpts.SnapID "--id", adminID, "-m", pOpts.Monitors, "--key=" + key}
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
if err != nil {
return err
}
klog.V(4).Infof("rbd: snap create %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
@ -530,20 +568,16 @@ func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte var output []byte
mon, err := getSnapMon(pOpts, credentials) image := pOpts.RbdImageName
snapName := pOpts.RbdSnapName
key, err := getKey(adminID, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
image := pOpts.VolName args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
snapID := pOpts.SnapID adminID, "-m", pOpts.Monitors, "--key=" + key}
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
if err != nil {
return err
}
klog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
@ -557,20 +591,16 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[strin
func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte var output []byte
mon, err := getSnapMon(pOpts, credentials) image := pOpts.RbdImageName
snapName := pOpts.RbdSnapName
key, err := getKey(adminID, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
image := pOpts.VolName args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
snapID := pOpts.SnapID adminID, "-m", pOpts.Monitors, "--key=" + key}
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
if err != nil {
return err
}
klog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
@ -578,26 +608,27 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
return errors.Wrapf(err, "failed to delete snapshot, command output: %s", string(output)) return errors.Wrapf(err, "failed to delete snapshot, command output: %s", string(output))
} }
if err := unreserveSnap(pOpts, credentials); err != nil {
klog.Errorf("failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)",
pOpts.RequestName, pOpts.RbdSnapName, pOpts.RbdImageName, err)
}
return nil return nil
} }
func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string, credentials map[string]string) error { func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte var output []byte
mon, err := getMon(pVolOpts, credentials) image := pVolOpts.RbdImageName
snapName := pSnapOpts.RbdSnapName
key, err := getKey(adminID, credentials)
if err != nil { if err != nil {
return err return err
} }
klog.V(4).Infof("rbd: clone %s using mon %s, pool %s", image, pVolOpts.Monitors, pVolOpts.Pool)
image := pVolOpts.VolName args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.RbdImageName + "@" + snapName,
snapID := pSnapOpts.SnapID pVolOpts.Pool + "/" + image, "--id", adminID, "-m", pVolOpts.Monitors, "--key=" + key}
key, err := getRBDKey(pVolOpts.ClusterID, adminID, credentials)
if err != nil {
return err
}
klog.V(4).Infof("rbd: clone %s using mon %s, pool %s", image, mon, pVolOpts.Pool)
args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.VolName + "@" + snapID, pVolOpts.Pool + "/" + image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
@ -607,3 +638,135 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string
return nil return nil
} }
// getSnapshotMetadata fetches on-disk metadata about the snapshot and populates the passed in
// rbdSnapshot structure
func getSnapshotMetadata(pSnapOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
imageName := pSnapOpts.RbdImageName
snapName := pSnapOpts.RbdSnapName
key, err := getKey(adminID, credentials)
if err != nil {
return err
}
snapInfo, err := getSnapInfo(pSnapOpts.Monitors, adminID, key, pSnapOpts.Pool, imageName, snapName)
if err != nil {
return err
}
pSnapOpts.SizeBytes = snapInfo.Size
tm, err := time.Parse(time.ANSIC, snapInfo.Timestamp)
if err != nil {
return err
}
pSnapOpts.CreatedAt, err = ptypes.TimestampProto(tm)
if err != nil {
return err
}
return nil
}
// imageInfo strongly typed JSON spec for image info
type imageInfo struct {
ObjectUUID string `json:"name"`
Size int64 `json:"size"`
Format int64 `json:"format"`
Features []string `json:"features"`
CreatedAt string `json:"create_timestamp"`
}
// getImageInfo queries rbd about the given image and returns its metadata, and returns
// ErrImageNotFound if provided image is not found
func getImageInfo(monitors, adminID, key, poolName, imageName string) (imageInfo, error) {
// rbd --format=json info [image-spec | snap-spec]
var imgInfo imageInfo
stdout, _, err := util.ExecCommand(
"rbd",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", util.CephConfigPath,
"--format="+"json",
"info", poolName+"/"+imageName)
if err != nil {
klog.Errorf("failed getting information for image (%s): (%s)", poolName+"/"+imageName, err)
if strings.Contains(string(stdout), "rbd: error opening image "+imageName+
": (2) No such file or directory") {
return imgInfo, ErrImageNotFound{imageName, err}
}
return imgInfo, err
}
err = json.Unmarshal(stdout, &imgInfo)
if err != nil {
klog.Errorf("failed to parse JSON output of image info (%s): (%s)",
poolName+"/"+imageName, err)
return imgInfo, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s",
err, string(stdout))
}
return imgInfo, nil
}
// snapInfo strongly typed JSON spec for snap ls rbd output
type snapInfo struct {
ID int64 `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
Timestamp string `json:"timestamp"`
}
/*
getSnapInfo queries rbd about the snapshots of the given image and returns its metadata, and
returns ErrImageNotFound if provided image is not found, and ErrSnapNotFound if provided snap
is not found in the images snapshot list
*/
func getSnapInfo(monitors, adminID, key, poolName, imageName, snapName string) (snapInfo, error) {
// rbd --format=json snap ls [image-spec]
var (
snpInfo snapInfo
snaps []snapInfo
)
stdout, _, err := util.ExecCommand(
"rbd",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", util.CephConfigPath,
"--format="+"json",
"snap", "ls", poolName+"/"+imageName)
if err != nil {
klog.Errorf("failed getting snap (%s) information from image (%s): (%s)",
snapName, poolName+"/"+imageName, err)
if strings.Contains(string(stdout), "rbd: error opening image "+imageName+
": (2) No such file or directory") {
return snpInfo, ErrImageNotFound{imageName, err}
}
return snpInfo, err
}
err = json.Unmarshal(stdout, &snaps)
if err != nil {
klog.Errorf("failed to parse JSON output of image snap list (%s): (%s)",
poolName+"/"+imageName, err)
return snpInfo, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s",
err, string(stdout))
}
for _, snap := range snaps {
if snap.Name == snapName {
return snap, nil
}
}
return snpInfo, ErrSnapNotFound{snapName, fmt.Errorf("snap (%s) for image (%s) not found",
snapName, poolName+"/"+imageName)}
}

554
pkg/rbd/voljournal.go Normal file
View File

@ -0,0 +1,554 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"strings"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"k8s.io/klog"
)
func validateNonEmptyField(field, fieldName, structName string) error {
if field == "" {
return fmt.Errorf("value '%s' in '%s' structure cannot be empty", fieldName, structName)
}
return nil
}
func validateRbdSnap(rbdSnap *rbdSnapshot) error {
if err := validateNonEmptyField(rbdSnap.RequestName, "RequestName", "rbdSnapshot"); err != nil {
return err
}
if err := validateNonEmptyField(rbdSnap.Monitors, "Monitors", "rbdSnapshot"); err != nil {
return err
}
if err := validateNonEmptyField(rbdSnap.AdminID, "AdminID", "rbdSnapshot"); err != nil {
return err
}
if err := validateNonEmptyField(rbdSnap.Pool, "Pool", "rbdSnapshot"); err != nil {
return err
}
if err := validateNonEmptyField(rbdSnap.RbdImageName, "RbdImageName", "rbdSnapshot"); err != nil {
return err
}
if err := validateNonEmptyField(rbdSnap.ClusterID, "ClusterID", "rbdSnapshot"); err != nil {
return err
}
return nil
}
func validateRbdVol(rbdVol *rbdVolume) error {
if err := validateNonEmptyField(rbdVol.RequestName, "RequestName", "rbdVolume"); err != nil {
return err
}
if err := validateNonEmptyField(rbdVol.Monitors, "Monitors", "rbdVolume"); err != nil {
return err
}
if err := validateNonEmptyField(rbdVol.AdminID, "AdminID", "rbdVolume"); err != nil {
return err
}
if err := validateNonEmptyField(rbdVol.Pool, "Pool", "rbdVolume"); err != nil {
return err
}
if err := validateNonEmptyField(rbdVol.ClusterID, "ClusterID", "rbdVolume"); err != nil {
return err
}
if rbdVol.VolSize == 0 {
return errors.New("value 'VolSize' in 'rbdVolume' structure cannot be 0")
}
return nil
}
/*
checkSnapExists, and its counterpart checkVolExists, function as checks to determine if passed
in rbdSnapshot or rbdVolume exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information regarding
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
respective CSI driver generated snapshot or volume name based locks are held, as otherwise racy
access to these omaps may end up leaving them in an inconsistent state.
These functions need enough information about cluster and pool (ie, Monitors, Pool, IDs filled in)
to operate. They further require that the RequestName element of the structure have a valid value
to operate on and determine if the said RequestName already exists on the backend.
These functions populate the snapshot or the image name, its attributes and the CSI snapshot/volume
ID for the same when succesful.
These functions also cleanup omap reservations that are stale. I.e when omap entries exist and
backing images or snapshots are missing, or one of the omaps exist and the next is missing. This is
because, the order of omap creation and deletion are inverse of each other, and protected by the
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
hence safe to garbage collect.
*/
func checkSnapExists(rbdSnap *rbdSnapshot, credentials map[string]string) (found bool, err error) {
if err = validateRbdSnap(rbdSnap); err != nil {
return false, err
}
key, err := getKey(rbdSnap.AdminID, credentials)
if err != nil {
return false, err
}
// check if request name is already part of the snaps omap
snapUUID, err := util.GetOMapValue(rbdSnap.Monitors, rbdSnap.AdminID,
key, rbdSnap.Pool, csiSnapsDirectory, csiSnapNameKeyPrefix+rbdSnap.RequestName)
if err != nil {
// error should specifically be not found, for image to be absent, any other error
// is not conclusive, and we should not proceed
if _, ok := err.(util.ErrKeyNotFound); ok {
return false, nil
}
return false, err
}
rbdSnap.RbdSnapName = rbdSnapNamePrefix + snapUUID
// TODO: use listomapvals to dump all keys instead of reading them one-by-one
// check if the snapshot image omap is present
savedSnapName, err := util.GetOMapValue(rbdSnap.Monitors, rbdSnap.AdminID,
key, rbdSnap.Pool, rbdSnapOMapPrefix+snapUUID, rbdSnapCSISnapNameKey)
if err != nil {
if _, ok := err.(util.ErrKeyNotFound); ok {
err = unreserveSnap(rbdSnap, credentials)
}
return false, err
}
// check if snapshot image omap points back to the request name
if savedSnapName != rbdSnap.RequestName {
// NOTE: This should never be possible, hence no cleanup, but log error
// and return, as cleanup may need to occur manually!
return false, fmt.Errorf("internal state inconsistent, omap snap"+
" names disagree, request name (%s) snap name (%s) image omap"+
" snap name (%s)", rbdSnap.RequestName, rbdSnap.RbdSnapName, savedSnapName)
}
// check if the snapshot source image omap is present
savedVolName, err := util.GetOMapValue(rbdSnap.Monitors, rbdSnap.AdminID,
key, rbdSnap.Pool, rbdSnapOMapPrefix+snapUUID, rbdSnapSourceImageKey)
if err != nil {
if _, ok := err.(util.ErrKeyNotFound); ok {
err = unreserveSnap(rbdSnap, credentials)
}
return false, err
}
// check if snapshot source image omap points back to the source volume passed in
if savedVolName != rbdSnap.RbdImageName {
// NOTE: This can happen if there is a snapname conflict, and we alerady have a snapshot
// with the same name pointing to a different RBD image as the source
err = fmt.Errorf("snapname points to different image, request name (%s)"+
" image name (%s) image omap"+" volume name (%s)",
rbdSnap.RequestName, rbdSnap.RbdImageName, savedVolName)
return false, ErrSnapNameConflict{rbdSnap.RequestName, err}
}
// Fetch on-disk image attributes
err = updateSnapWithImageInfo(rbdSnap, credentials)
if err != nil {
if _, ok := err.(ErrSnapNotFound); ok {
err = unreserveSnap(rbdSnap, credentials)
return false, err
}
return false, err
}
// found a snapshot already available, process and return its information
poolID, err := util.GetPoolID(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool)
if err != nil {
return false, err
}
vi := util.CSIIdentifier{
PoolID: poolID,
EncodingVersion: volIDVersion,
ClusterID: rbdSnap.ClusterID,
ObjectUUID: snapUUID,
}
rbdSnap.SnapID, err = vi.ComposeCSIID()
if err != nil {
return false, err
}
klog.V(4).Infof("Found existing snap (%s) with snap name (%s) for request (%s)",
rbdSnap.SnapID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return true, nil
}
/*
Check comment on checkSnapExists, to understand how this function behaves
**NOTE:** These functions manipulate the rados omaps that hold information regarding
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
respective CSI snapshot or volume name based locks are held, as otherwise racy access to these
omaps may end up leaving the omaps in an inconsistent state.
*/
func checkVolExists(rbdVol *rbdVolume, credentials map[string]string) (found bool, err error) {
var vi util.CSIIdentifier
if err = validateRbdVol(rbdVol); err != nil {
return false, err
}
key, err := getKey(rbdVol.AdminID, credentials)
if err != nil {
return false, err
}
// check if request name is already part of the volumes omap
imageUUID, err := util.GetOMapValue(rbdVol.Monitors, rbdVol.AdminID,
key, rbdVol.Pool, csiVolsDirectory, csiVolNameKeyPrefix+rbdVol.RequestName)
if err != nil {
// error should specifically be not found, for image to be absent, any other error
// is not conclusive, and we should not proceed
if _, ok := err.(util.ErrKeyNotFound); ok {
return false, nil
}
return false, err
}
rbdVol.RbdImageName = rbdImgNamePrefix + imageUUID
// check if the image omap is present
savedVolName, err := util.GetOMapValue(rbdVol.Monitors, rbdVol.AdminID,
key, rbdVol.Pool, rbdImageOMapPrefix+imageUUID, rbdImageCSIVolNameKey)
if err != nil {
if _, ok := err.(util.ErrKeyNotFound); ok {
err = unreserveVol(rbdVol, credentials)
}
return false, err
}
// check if image omap points back to the request name
if savedVolName != rbdVol.RequestName {
// NOTE: This should never be possible, hence no cleanup, but log error
// and return, as cleanup may need to occur manually!
return false, fmt.Errorf("internal state inconsistent, omap volume"+
" names disagree, request name (%s) image name (%s) image omap"+
" volume name (%s)", rbdVol.RequestName, rbdVol.RbdImageName, savedVolName)
}
// NOTE: Return volsize should be on-disk volsize, not request vol size, so
// save it for size checks before fetching image data
requestSize := rbdVol.VolSize
// Fetch on-disk image attributes and compare against request
err = updateVolWithImageInfo(rbdVol, credentials)
if err != nil {
if _, ok := err.(ErrImageNotFound); ok {
err = unreserveVol(rbdVol, credentials)
return false, err
}
return false, err
}
// size checks
if rbdVol.VolSize < requestSize {
err = fmt.Errorf("image with the same name (%s) but with different size already exists",
rbdVol.RbdImageName)
return false, ErrVolNameConflict{rbdVol.RbdImageName, err}
}
// TODO: We should also ensure image features and format is the same
// found a volume already available, process and return it!
poolID, err := util.GetPoolID(rbdVol.Monitors, rbdVol.AdminID, key, rbdVol.Pool)
if err != nil {
return false, err
}
vi = util.CSIIdentifier{
PoolID: poolID,
EncodingVersion: volIDVersion,
ClusterID: rbdVol.ClusterID,
ObjectUUID: imageUUID,
}
rbdVol.VolID, err = vi.ComposeCSIID()
if err != nil {
return false, err
}
klog.V(4).Infof("Found existng volume (%s) with image name (%s) for request (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
return true, nil
}
/*
unreserveSnap and unreserveVol remove omaps associated with the snapshot and the image name,
and also remove the corresponding request name key in the snaps or volumes omaps respectively.
This is performed within the request name lock, to ensure that requests with the same name do not
manipulate the omap entries concurrently.
*/
func unreserveSnap(rbdSnap *rbdSnapshot, credentials map[string]string) error {
key, err := getKey(rbdSnap.AdminID, credentials)
if err != nil {
return err
}
// delete snap image omap (first, inverse of create order)
snapUUID := strings.TrimPrefix(rbdSnap.RbdSnapName, rbdSnapNamePrefix)
err = util.RemoveObject(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool, rbdSnapOMapPrefix+snapUUID)
if err != nil {
if _, ok := err.(util.ErrObjectNotFound); !ok {
klog.Errorf("failed removing oMap %s (%s)", rbdSnapOMapPrefix+snapUUID, err)
return err
}
}
// delete the request name omap key (last, inverse of create order)
err = util.RemoveOMapKey(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool,
csiSnapsDirectory, csiSnapNameKeyPrefix+rbdSnap.RequestName)
if err != nil {
klog.Errorf("failed removing oMap key %s (%s)", csiSnapNameKeyPrefix+rbdSnap.RequestName, err)
return err
}
return nil
}
func unreserveVol(rbdVol *rbdVolume, credentials map[string]string) error {
key, err := getKey(rbdVol.AdminID, credentials)
if err != nil {
return err
}
// delete image omap (first, inverse of create order)
imageUUID := strings.TrimPrefix(rbdVol.RbdImageName, rbdImgNamePrefix)
err = util.RemoveObject(rbdVol.Monitors, rbdVol.AdminID, key, rbdVol.Pool, rbdImageOMapPrefix+imageUUID)
if err != nil {
if _, ok := err.(util.ErrObjectNotFound); !ok {
klog.Errorf("failed removing oMap %s (%s)", rbdImageOMapPrefix+imageUUID, err)
return err
}
}
// delete the request name omap key (last, inverse of create order)
err = util.RemoveOMapKey(rbdVol.Monitors, rbdVol.AdminID, key, rbdVol.Pool,
csiVolsDirectory, csiVolNameKeyPrefix+rbdVol.RequestName)
if err != nil {
klog.Errorf("failed removing oMap key %s (%s)", csiVolNameKeyPrefix+rbdVol.RequestName, err)
return err
}
return nil
}
// reserveOMapName creates an omap with passed in oMapNamePrefix and a generated <uuid>.
// It ensures generated omap name does not already exist and if conflicts are detected, a set
// number of retires with newer uuids are attempted before returning an error
func reserveOMapName(monitors, adminID, key, poolName, oMapNamePrefix string) (string, error) {
var iterUUID string
maxAttempts := 5
attempt := 1
for attempt <= maxAttempts {
// generate a uuid for the image name
iterUUID = uuid.NewUUID().String()
err := util.CreateObject(monitors, adminID, key, poolName, oMapNamePrefix+iterUUID)
if err != nil {
if _, ok := err.(util.ErrObjectExists); ok {
attempt++
// try again with a different uuid, for maxAttempts tries
klog.V(4).Infof("uuid (%s) conflict detected, retrying (attempt %d of %d)",
iterUUID, attempt, maxAttempts)
continue
}
return "", err
}
break
}
if attempt > maxAttempts {
return "", errors.New("uuid conflicts exceeds retry threshold")
}
return iterUUID, nil
}
/*
reserveSnap and reserveVol add respective entries to the volumes and snapshots omaps, post
generating a target snapshot or image name for use. Further, these functions create the snapshot or
image name omaps, to store back pointers to the CSI generated request names.
This is performed within the request name lock, to ensure that requests with the same name do not
manipulate the omap entries concurrently.
*/
func reserveSnap(rbdSnap *rbdSnapshot, credentials map[string]string) error {
var vi util.CSIIdentifier
key, err := getKey(rbdSnap.AdminID, credentials)
if err != nil {
return err
}
poolID, err := util.GetPoolID(rbdSnap.Monitors, rbdSnap.AdminID, key,
rbdSnap.Pool)
if err != nil {
return err
}
// Create the snapUUID based omap first, to reserve the same and avoid conflicts
// NOTE: If any service loss occurs post creation of the snap omap, and before
// setting the omap key (rbdSnapCSISnapNameKey) to point back to the snaps omap, the
// snap omap key will leak
snapUUID, err := reserveOMapName(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool,
rbdSnapOMapPrefix)
if err != nil {
return err
}
// Create request snapUUID key in csi snaps omap and store the uuid based
// snap name into it
err = util.SetOMapKeyValue(rbdSnap.Monitors, rbdSnap.AdminID, key,
rbdSnap.Pool, csiSnapsDirectory, csiSnapNameKeyPrefix+rbdSnap.RequestName, snapUUID)
if err != nil {
return err
}
defer func() {
if err != nil {
klog.Warningf("reservation failed for volume: %s", rbdSnap.RequestName)
errDefer := unreserveSnap(rbdSnap, credentials)
if errDefer != nil {
klog.Warningf("failed undoing reservation of snapshot: %s (%v)",
rbdSnap.RequestName, errDefer)
}
}
}()
// Create snap name based omap and store CSI request name key and source information
err = util.SetOMapKeyValue(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool,
rbdSnapOMapPrefix+snapUUID, rbdSnapCSISnapNameKey, rbdSnap.RequestName)
if err != nil {
return err
}
err = util.SetOMapKeyValue(rbdSnap.Monitors, rbdSnap.AdminID, key, rbdSnap.Pool,
rbdSnapOMapPrefix+snapUUID, rbdSnapSourceImageKey, rbdSnap.RbdImageName)
if err != nil {
return err
}
// generate the volume ID to return to the CO system
vi = util.CSIIdentifier{
PoolID: poolID,
EncodingVersion: volIDVersion,
ClusterID: rbdSnap.ClusterID,
ObjectUUID: snapUUID,
}
rbdSnap.SnapID, err = vi.ComposeCSIID()
if err != nil {
return err
}
rbdSnap.RbdSnapName = rbdSnapNamePrefix + snapUUID
klog.V(4).Infof("Generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdSnap.SnapID, rbdSnap.RbdImageName, rbdSnap.RequestName)
return nil
}
func reserveVol(rbdVol *rbdVolume, credentials map[string]string) error {
var vi util.CSIIdentifier
key, err := getKey(rbdVol.AdminID, credentials)
if err != nil {
return err
}
poolID, err := util.GetPoolID(rbdVol.Monitors, rbdVol.AdminID, key,
rbdVol.Pool)
if err != nil {
return err
}
// Create the imageUUID based omap first, to reserve the same and avoid conflicts
// NOTE: If any service loss occurs post creation of the image omap, and before
// setting the omap key (rbdImageCSIVolNameKey) to point back to the volumes omap,
// the image omap key will leak
imageUUID, err := reserveOMapName(rbdVol.Monitors, rbdVol.AdminID, key, rbdVol.Pool, rbdImageOMapPrefix)
if err != nil {
return err
}
// Create request volName key in csi volumes omap and store the uuid based
// image name into it
err = util.SetOMapKeyValue(rbdVol.Monitors, rbdVol.AdminID, key,
rbdVol.Pool, csiVolsDirectory, csiVolNameKeyPrefix+rbdVol.RequestName, imageUUID)
if err != nil {
return err
}
defer func() {
if err != nil {
klog.Warningf("reservation failed for volume: %s", rbdVol.RequestName)
errDefer := unreserveVol(rbdVol, credentials)
if errDefer != nil {
klog.Warningf("failed undoing reservation of volume: %s (%v)",
rbdVol.RequestName, errDefer)
}
}
}()
// Create image name based omap and store CSI request volume name key and data
err = util.SetOMapKeyValue(rbdVol.Monitors, rbdVol.AdminID, key, rbdVol.Pool,
rbdImageOMapPrefix+imageUUID, rbdImageCSIVolNameKey, rbdVol.RequestName)
if err != nil {
return err
}
// generate the volume ID to return to the CO system
vi = util.CSIIdentifier{
PoolID: poolID,
EncodingVersion: volIDVersion,
ClusterID: rbdVol.ClusterID,
ObjectUUID: imageUUID,
}
rbdVol.VolID, err = vi.ComposeCSIID()
if err != nil {
return err
}
rbdVol.RbdImageName = rbdImgNamePrefix + imageUUID
klog.V(4).Infof("Generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
return nil
}

256
pkg/util/cephcmds.go Normal file
View File

@ -0,0 +1,256 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"k8s.io/klog"
"os"
"os/exec"
"strings"
)
// ExecCommand executes passed in program with args and returns seperate stdout and stderr streams
func ExecCommand(program string, args ...string) (stdout, stderr []byte, err error) {
var (
cmd = exec.Command(program, args...) // nolint: gosec
stdoutBuf bytes.Buffer
stderrBuf bytes.Buffer
)
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
if err := cmd.Run(); err != nil {
return stdoutBuf.Bytes(), stderrBuf.Bytes(), fmt.Errorf("an error (%v)"+
" occurred while running %s", err, program)
}
return stdoutBuf.Bytes(), nil, nil
}
// cephStoragePoolSummary strongly typed JSON spec for osd ls pools output
type cephStoragePoolSummary struct {
Name string `json:"poolname"`
Number int64 `json:"poolnum"`
}
// GetPoolID searches a list of pools in a cluster and returns the ID of the pool that matches
// the passed in poolName parameter
func GetPoolID(monitors string, adminID string, key string, poolName string) (int64, error) {
// ceph <options> -f json osd lspools
// JSON out: [{"poolnum":<int64>,"poolname":<string>}]
stdout, _, err := ExecCommand(
"ceph",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-f", "json",
"osd", "lspools")
if err != nil {
klog.Errorf("failed getting pool list from cluster (%s)", err)
return 0, err
}
var pools []cephStoragePoolSummary
err = json.Unmarshal(stdout, &pools)
if err != nil {
klog.Errorf("failed to parse JSON output of pool list from cluster (%s)", err)
return 0, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s", err, string(stdout))
}
for _, p := range pools {
if poolName == p.Name {
return p.Number, nil
}
}
return 0, fmt.Errorf("pool (%s) not found in Ceph cluster", poolName)
}
// GetPoolName lists all pools in a ceph cluster, and matches the pool whose pool ID is equal to
// the requested poolID parameter
func GetPoolName(monitors string, adminID string, key string, poolID int64) (string, error) {
// ceph <options> -f json osd lspools
// [{"poolnum":1,"poolname":"replicapool"}]
stdout, _, err := ExecCommand(
"ceph",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-f", "json",
"osd", "lspools")
if err != nil {
klog.Errorf("failed getting pool list from cluster (%s)", err)
return "", err
}
var pools []cephStoragePoolSummary
err = json.Unmarshal(stdout, &pools)
if err != nil {
klog.Errorf("failed to parse JSON output of pool list from cluster (%s)", err)
return "", fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s", err, string(stdout))
}
for _, p := range pools {
if poolID == p.Number {
return p.Name, nil
}
}
return "", fmt.Errorf("pool ID (%d) not found in Ceph cluster", poolID)
}
// SetOMapKeyValue sets the given key and value into the provided Ceph omap name
func SetOMapKeyValue(monitors, adminID, key, poolName, oMapName, oMapKey, keyValue string) error {
// Command: "rados <options> setomapval oMapName oMapKey keyValue"
_, _, err := ExecCommand(
"rados",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-p", poolName,
"setomapval", oMapName, oMapKey, keyValue)
if err != nil {
klog.Errorf("failed adding key (%s with value %s), to omap (%s) in "+
"pool (%s): (%v)", oMapKey, keyValue, oMapName, poolName, err)
return err
}
return nil
}
// GetOMapValue gets the value for the given key from the named omap
func GetOMapValue(monitors, adminID, key, poolName, oMapName, oMapKey string) (string, error) {
// Command: "rados <options> getomapval oMapName oMapKey <outfile>"
// No such key: replicapool/csi.volumes.directory.default/csi.volname
tmpFile, err := ioutil.TempFile("", "omap-get-")
if err != nil {
klog.Errorf("failed creating a temporary file for key contents")
return "", err
}
defer tmpFile.Close()
defer os.Remove(tmpFile.Name())
stdout, stderr, err := ExecCommand(
"rados",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-p", poolName,
"getomapval", oMapName, oMapKey, tmpFile.Name())
if err != nil {
// no logs, as attempting to check for key/value is done even on regular call sequences
stdoutanderr := strings.Join([]string{string(stdout), string(stderr)}, " ")
if strings.Contains(stdoutanderr, "No such key: "+poolName+"/"+oMapName+"/"+oMapKey) {
return "", ErrKeyNotFound{poolName + "/" + oMapName + "/" + oMapKey, err}
}
if strings.Contains(stdoutanderr, "error getting omap value "+
poolName+"/"+oMapName+"/"+oMapKey+": (2) No such file or directory") {
return "", ErrKeyNotFound{poolName + "/" + oMapName + "/" + oMapKey, err}
}
return "", fmt.Errorf("error (%v) occured, command output streams is (%s)",
err.Error(), stdoutanderr)
}
keyValue, err := ioutil.ReadAll(tmpFile)
return string(keyValue), err
}
// RemoveOMapKey removes the omap key from the given omap name
func RemoveOMapKey(monitors, adminID, key, poolName, oMapName, oMapKey string) error {
// Command: "rados <options> rmomapkey oMapName oMapKey"
_, _, err := ExecCommand(
"rados",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-p", poolName,
"rmomapkey", oMapName, oMapKey)
if err != nil {
// NOTE: Missing omap key removal does not return an error
klog.Errorf("failed removing key (%s), from omap (%s) in "+
"pool (%s): (%v)", oMapKey, oMapName, poolName, err)
return err
}
return nil
}
// CreateObject creates the object name passed in and returns ErrObjectExists if the provided object
// is already present in rados
func CreateObject(monitors, adminID, key, poolName, objectName string) error {
// Command: "rados <options> create objectName"
stdout, _, err := ExecCommand(
"rados",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-p", poolName,
"create", objectName)
if err != nil {
klog.Errorf("failed creating omap (%s) in pool (%s): (%v)", objectName, poolName, err)
if strings.Contains(string(stdout), "error creating "+poolName+"/"+objectName+
": (17) File exists") {
return ErrObjectExists{objectName, err}
}
return err
}
return nil
}
// RemoveObject removes the entire omap name passed in and returns ErrObjectNotFound is provided omap
// is not found in rados
func RemoveObject(monitors, adminID, key, poolName, oMapName string) error {
// Command: "rados <options> rm oMapName"
stdout, _, err := ExecCommand(
"rados",
"-m", monitors,
"--id", adminID,
"--key="+key,
"-c", CephConfigPath,
"-p", poolName,
"rm", oMapName)
if err != nil {
klog.Errorf("failed removing omap (%s) in pool (%s): (%v)", oMapName, poolName, err)
if strings.Contains(string(stdout), "error removing "+poolName+">"+oMapName+
": (2) No such file or directory") {
return ErrObjectNotFound{oMapName, err}
}
return err
}
return nil
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cephfs package util
import ( import (
"io/ioutil" "io/ioutil"
@ -32,17 +32,19 @@ fuse_set_user_groups = false
const ( const (
cephConfigRoot = "/etc/ceph" cephConfigRoot = "/etc/ceph"
cephConfigPath = "/etc/ceph/ceph.conf" CephConfigPath = "/etc/ceph/ceph.conf"
) )
func createCephConfigRoot() error { func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec return os.MkdirAll(cephConfigRoot, 0755) // #nosec
} }
func writeCephConfig() error { // WriteCephConfig writes out a basic ceph.conf file, making it easy to use
// ceph related CLIs
func WriteCephConfig() error {
if err := createCephConfigRoot(); err != nil { if err := createCephConfigRoot(); err != nil {
return err return err
} }
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640) return ioutil.WriteFile(CephConfigPath, cephConfig, 0640)
} }

View File

@ -1,137 +0,0 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"k8s.io/klog"
"path"
"strings"
)
// StoreReader interface enables plugging different stores, that contain the
// keys and data. (e.g k8s secrets or local files)
type StoreReader interface {
DataForKey(clusterID string, key string) (string, error)
}
/* ConfigKeys contents and format,
- csMonitors: MON list, comma separated
- csAdminID: adminID, used for provisioning
- csUserID: userID, used for publishing
- csAdminKey: key, for adminID in csProvisionerUser
- csUserKey: key, for userID in csPublisherUser
- csPools: Pool list, comma separated
*/
// Constants for various ConfigKeys
const (
csMonitors = "monitors"
csAdminID = "adminid"
csUserID = "userid"
csAdminKey = "adminkey"
csUserKey = "userkey"
csPools = "pools"
)
// ConfigStore provides various gettors for ConfigKeys
type ConfigStore struct {
StoreReader
}
// dataForKey returns data from the config store for the provided key
func (dc *ConfigStore) dataForKey(clusterID, key string) (string, error) {
if dc.StoreReader != nil {
return dc.StoreReader.DataForKey(clusterID, key)
}
return "", errors.New("config store location uninitialized")
}
// Mons returns a comma separated MON list from the cluster config represented by clusterID
func (dc *ConfigStore) Mons(clusterID string) (string, error) {
return dc.dataForKey(clusterID, csMonitors)
}
// Pools returns a list of pool names from the cluster config represented by clusterID
func (dc *ConfigStore) Pools(clusterID string) ([]string, error) {
content, err := dc.dataForKey(clusterID, csPools)
if err != nil {
return nil, err
}
return strings.Split(content, ","), nil
}
// AdminID returns the admin ID from the cluster config represented by clusterID
func (dc *ConfigStore) AdminID(clusterID string) (string, error) {
return dc.dataForKey(clusterID, csAdminID)
}
// UserID returns the user ID from the cluster config represented by clusterID
func (dc *ConfigStore) UserID(clusterID string) (string, error) {
return dc.dataForKey(clusterID, csUserID)
}
// KeyForUser returns the key for the requested user ID from the cluster config
// represented by clusterID
func (dc *ConfigStore) KeyForUser(clusterID, userID string) (data string, err error) {
var fetchKey string
user, err := dc.AdminID(clusterID)
if err != nil {
return
}
if user == userID {
fetchKey = csAdminKey
} else {
user, err = dc.UserID(clusterID)
if err != nil {
return
}
if user != userID {
err = fmt.Errorf("requested user (%s) not found in cluster configuration of (%s)", userID, clusterID)
return
}
fetchKey = csUserKey
}
return dc.dataForKey(clusterID, fetchKey)
}
// NewConfigStore returns a config store based on value of configRoot. If
// configRoot is not "k8s_objects" then it is assumed to be a path to a
// directory, under which the configuration files can be found
func NewConfigStore(configRoot string) (*ConfigStore, error) {
if configRoot != "k8s_objects" {
klog.Infof("cache-store: using files in path (%s) as config store", configRoot)
fc := &FileConfig{}
fc.BasePath = path.Clean(configRoot)
dc := &ConfigStore{fc}
return dc, nil
}
klog.Infof("cache-store: using k8s objects as config store")
kc := &K8sConfig{}
kc.Client = NewK8sClient()
kc.Namespace = GetK8sNamespace()
dc := &ConfigStore{kc}
return dc, nil
}

View File

@ -1,160 +0,0 @@
/*
Copyright 2019 ceph-csi authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"strings"
"testing"
)
var basePath = "./test_artifacts"
var clusterID = "testclusterid"
var cs *ConfigStore
func cleanupTestData() {
os.RemoveAll(basePath)
}
// nolint: gocyclo
func TestConfigStore(t *testing.T) {
var err error
var data string
var content string
var testDir string
defer cleanupTestData()
cs, err = NewConfigStore(basePath)
if err != nil {
t.Errorf("Fatal, failed to get a new config store")
}
err = os.MkdirAll(basePath, 0700)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as clusterid directory is missing
_, err = cs.Mons(clusterID)
if err == nil {
t.Errorf("Failed: expected error due to missing parent directory")
}
testDir = basePath + "/" + "ceph-cluster-" + clusterID
err = os.MkdirAll(testDir, 0700)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as mons file is missing
_, err = cs.Mons(clusterID)
if err == nil {
t.Errorf("Failed: expected error due to missing mons file")
}
data = ""
err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as MONs is an empty string
content, err = cs.Mons(clusterID)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
data = "mon1,mon2,mon3"
err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching MONs should succeed
content, err = cs.Mons(clusterID)
if err != nil || content != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
data = "pool1,pool2"
err = ioutil.WriteFile(testDir+"/"+csPools, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching MONs should succeed
listContent, err := cs.Pools(clusterID)
if err != nil || strings.Join(listContent, ",") != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
data = "provuser"
err = ioutil.WriteFile(testDir+"/"+csAdminID, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching provuser should succeed
content, err = cs.AdminID(clusterID)
if err != nil || content != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
data = "pubuser"
err = ioutil.WriteFile(testDir+"/"+csUserID, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching pubuser should succeed
content, err = cs.UserID(clusterID)
if err != nil || content != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
data = "provkey"
err = ioutil.WriteFile(testDir+"/"+csAdminKey, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching provkey should succeed
content, err = cs.KeyForUser(clusterID, "provuser")
if err != nil || content != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
data = "pubkey"
err = ioutil.WriteFile(testDir+"/"+csUserKey, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Fetching pubkey should succeed
content, err = cs.KeyForUser(clusterID, "pubuser")
if err != nil || content != data {
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
}
// TEST: Fetching random user key should fail
_, err = cs.KeyForUser(clusterID, "random")
if err == nil {
t.Errorf("Failed: Expected to fail fetching random user key")
}
}

74
pkg/util/csiconfig.go Normal file
View File

@ -0,0 +1,74 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
)
/*
Mons returns a comma separated MON list from the csi config for the given clusterID
Expected JSON structure in the passed in config file is,
[
{
"clusterID": "<cluster-id>",
"monitors":
[
"<monitor-value>",
"<monitor-value>",
...
]
},
...
]
*/
// clusterInfo strongly typed JSON spec for the above JSON structure
type clusterInfo struct {
ClusterID string `json:"clusterID"`
Monitors []string `json:"monitors"`
}
func Mons(pathToConfig, clusterID string) (string, error) {
var config []clusterInfo
// #nosec
content, err := ioutil.ReadFile(pathToConfig)
if err != nil {
err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterID, err)
return "", err
}
err = json.Unmarshal(content, &config)
if err != nil {
return "", fmt.Errorf("unmarshal failed: %v. raw buffer response: %s",
err, string(content))
}
for _, cluster := range config {
if cluster.ClusterID == clusterID {
if len(cluster.Monitors) == 0 {
return "", fmt.Errorf("empty monitor list for cluster ID (%s) in config", clusterID)
}
return strings.Join(cluster.Monitors, ","), nil
}
}
return "", fmt.Errorf("missing configuration for cluster ID (%s)", clusterID)
}

132
pkg/util/csiconfig_test.go Normal file
View File

@ -0,0 +1,132 @@
/*
Copyright 2019 ceph-csi authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"testing"
)
var basePath = "./test_artifacts"
var csiClusters = "csi-clusters.json"
var pathToConfig = basePath + "/" + csiClusters
var clusterID1 = "test1"
var clusterID2 = "test2"
func cleanupTestData() {
os.RemoveAll(basePath)
}
// nolint: gocyclo
func TestCSIConfig(t *testing.T) {
var err error
var data string
var content string
defer cleanupTestData()
err = os.MkdirAll(basePath, 0700)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as clusterid file is missing
_, err = Mons(pathToConfig, clusterID1)
if err == nil {
t.Errorf("Failed: expected error due to missing config")
}
data = ""
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as file is empty
content, err = Mons(pathToConfig, clusterID1)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
data = "[{\"clusterIDBad\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as clusterID data is malformed
content, err = Mons(pathToConfig, clusterID2)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitorsBad\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as monitors key is incorrect/missing
content, err = Mons(pathToConfig, clusterID2)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",2,\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as monitor data is malformed
content, err = Mons(pathToConfig, clusterID2)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should fail as clusterID is not present in config
content, err = Mons(pathToConfig, clusterID1)
if err == nil {
t.Errorf("Failed: want (%s), got (%s)", data, content)
}
// TEST: Should pass as clusterID is present in config
content, err = Mons(pathToConfig, clusterID2)
if err != nil || content != "mon1,mon2,mon3" {
t.Errorf("Failed: want (%s), got (%s) (%v)", "mon1,mon2,mon3", content, err)
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}," +
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon4\",\"mon5\",\"mon6\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0644)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should pass as clusterID is present in config
content, err = Mons(pathToConfig, clusterID1)
if err != nil || content != "mon4,mon5,mon6" {
t.Errorf("Failed: want (%s), got (%s) (%v)", "mon4,mon5,mon6", content, err)
}
}

47
pkg/util/errors.go Normal file
View File

@ -0,0 +1,47 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// ErrKeyNotFound is returned when requested key in omap is not found
type ErrKeyNotFound struct {
keyName string
err error
}
func (e ErrKeyNotFound) Error() string {
return e.err.Error()
}
// ErrObjectExists is returned when named omap is already present in rados
type ErrObjectExists struct {
objectName string
err error
}
func (e ErrObjectExists) Error() string {
return e.err.Error()
}
// ErrObjectNotFound is returned when named omap is not found in rados
type ErrObjectNotFound struct {
oMapName string
err error
}
func (e ErrObjectNotFound) Error() string {
return e.err.Error()
}

View File

@ -1,57 +0,0 @@
/*
Copyright 2019 ceph-csi authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io/ioutil"
"path"
)
/*
FileConfig is a ConfigStore interface implementation that reads configuration
information from files.
BasePath defines the directory under which FileConfig will attempt to open and
read contents of various Ceph cluster configurations.
Each Ceph cluster configuration is stored under a directory named,
BasePath/ceph-cluster-<clusterid>, where <clusterid> uniquely identifies and
separates the each Ceph cluster configuration.
Under each Ceph cluster configuration directory, individual files named as per
the ConfigKeys constants in the ConfigStore interface, store the required
configuration information.
*/
type FileConfig struct {
BasePath string
}
// DataForKey reads the appropriate config file, named using key, and returns
// the contents of the file to the caller
func (fc *FileConfig) DataForKey(clusterid, key string) (data string, err error) {
pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+clusterid, key)
// #nosec
content, err := ioutil.ReadFile(pathToKey)
if err != nil || string(content) == "" {
err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err)
return
}
data = string(content)
return
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2019 ceph-csi authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8s "k8s.io/client-go/kubernetes"
)
/*
K8sConfig is a ConfigStore interface implementation that reads configuration
information from k8s secrets.
Each Ceph cluster configuration secret is expected to be named,
ceph-cluster-<clusterid>, where <clusterid> uniquely identifies and
separates the each Ceph cluster configuration.
The secret is expected to contain keys, as defined by the ConfigKeys constants
in the ConfigStore interface.
*/
type K8sConfig struct {
Client *k8s.Clientset
Namespace string
}
// DataForKey reads the appropriate k8s secret, named using clusterid, and
// returns the contents of key within the secret
func (kc *K8sConfig) DataForKey(clusterid, key string) (data string, err error) {
secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+clusterid, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err)
return
}
content, ok := secret.Data[key]
if !ok {
err = fmt.Errorf("missing data for key (%s) in cluster configuration of (%s)", key, clusterid)
return
}
data = string(content)
return
}

151
pkg/util/volid.go Normal file
View File

@ -0,0 +1,151 @@
/*
Copyright 2019 Ceph-CSI authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/binary"
"encoding/hex"
"errors"
"strings"
)
/*
CSIIdentifier contains the elements that form a CSI ID to be returned by the CSI plugin, and
contains enough information to decompose and extract required cluster and pool information to locate
the volume that relates to the CSI ID.
The CSI identifier is composed as elaborated in the comment against ComposeCSIID and thus,
DecomposeCSIID is the inverse of the same function.
The CSIIdentifier structure carries the following fields,
- PoolID: 64 bit integer of the pool that the volume belongs to, where the ID comes from Ceph pool
identifier for the corresponding pool name.
- EncodingVersion: Carries the version number of the encoding scheme used to encode the CSI ID,
and is preserved for any future proofing w.r.t changes in the encoding scheme, and to retain
ability to parse backward compatible encodings.
- ClusterID: Is a unique ID per cluster that the CSI instance is serving and is restricted to
lengths that can be accommodated in the encoding scheme.
- ObjectUUID: Is the on-disk uuid of the object (image/snapshot) name, for the CSI volume that
corresponds to this CSI ID.
*/
type CSIIdentifier struct {
PoolID int64 // TODO: Name appropriately when reused for CephFS
EncodingVersion uint16
ClusterID string
ObjectUUID string
}
// This maximum comes from the CSI spec on max bytes allowed in the various CSI ID fields
const maxVolIDLen = 128
/*
ComposeCSIID composes a CSI ID from passed in parameters.
Version 1 of the encoding scheme is as follows,
[csi_id_version=1:4byte] + [-:1byte]
[length of clusterID=1:4byte] + [-:1byte]
[clusterID:36bytes (MAX)] + [-:1byte]
[poolID:16bytes] + [-:1byte]
[ObjectUUID:36bytes]
Total of constant field lengths, including '-' field separators would hence be,
4+1+4+1+1+16+1+36 = 64
*/
const (
knownFieldSize = 64
uuidSize = 36
)
func (ci CSIIdentifier) ComposeCSIID() (string, error) {
buf16 := make([]byte, 2)
buf64 := make([]byte, 8)
if (knownFieldSize + len(ci.ClusterID)) > maxVolIDLen {
return "", errors.New("CSI ID encoding length overflow")
}
if len(ci.ObjectUUID) != uuidSize {
return "", errors.New("CSI ID invalid object uuid")
}
binary.BigEndian.PutUint16(buf16, ci.EncodingVersion)
versionEncodedHex := hex.EncodeToString(buf16)
binary.BigEndian.PutUint16(buf16, uint16(len(ci.ClusterID)))
clusterIDLength := hex.EncodeToString(buf16)
binary.BigEndian.PutUint64(buf64, uint64(ci.PoolID))
poolIDEncodedHex := hex.EncodeToString(buf64)
return strings.Join([]string{versionEncodedHex, clusterIDLength, ci.ClusterID,
poolIDEncodedHex, ci.ObjectUUID}, "-"), nil
}
/*
DecomposeCSIID composes a CSIIdentifier from passed in string
*/
func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) {
bytesToProcess := uint16(len(composedCSIID))
// if length is less that expected constant elements, then bail out!
if bytesToProcess < knownFieldSize {
return errors.New("failed to decode CSI identifier, string underflow")
}
buf16, err := hex.DecodeString(composedCSIID[0:4])
if err != nil {
return err
}
ci.EncodingVersion = binary.BigEndian.Uint16(buf16)
// 4 for version encoding and 1 for '-' separator
bytesToProcess -= 5
buf16, err = hex.DecodeString(composedCSIID[5:9])
if err != nil {
return err
}
clusterIDLength := binary.BigEndian.Uint16(buf16)
// 4 for length encoding and 1 for '-' separator
bytesToProcess -= 5
if bytesToProcess < (clusterIDLength + 1) {
return errors.New("failed to decode CSI identifier, string underflow")
}
ci.ClusterID = composedCSIID[10 : 10+clusterIDLength]
// additional 1 for '-' separator
bytesToProcess -= (clusterIDLength + 1)
nextFieldStartIdx := 10 + clusterIDLength + 1
if bytesToProcess < 17 {
return errors.New("failed to decode CSI identifier, string underflow")
}
buf64, err := hex.DecodeString(composedCSIID[nextFieldStartIdx : nextFieldStartIdx+16])
if err != nil {
return err
}
ci.PoolID = int64(binary.BigEndian.Uint64(buf64))
// 16 for poolID encoding and 1 for '-' separator
bytesToProcess -= 17
nextFieldStartIdx = nextFieldStartIdx + 17
// has to be an exact match
if bytesToProcess != uuidSize {
return errors.New("failed to decode CSI identifier, string size mismatch")
}
ci.ObjectUUID = composedCSIID[nextFieldStartIdx : nextFieldStartIdx+uuidSize]
return err
}

95
pkg/util/volid_test.go Normal file
View File

@ -0,0 +1,95 @@
/*
Copyright 2019 Ceph-CSI authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
)
type testTuple struct {
vID CSIIdentifier
composedVolID string
wantEnc bool
wantEncError bool
wantDec bool
wantDecError bool
}
// TODO: Add more test tuples to test out other edge conditions
var testData = []testTuple{
{
vID: CSIIdentifier{
PoolID: 0xffff,
EncodingVersion: 0xffff,
ClusterID: "01616094-9d93-4178-bf45-c7eac19e8b15",
ObjectUUID: "00000000-1111-2222-bbbb-cacacacacaca",
},
composedVolID: "ffff-0024-01616094-9d93-4178-bf45-c7eac19e8b15-000000000000ffff-00000000-1111-2222-bbbb-cacacacacaca",
wantEnc: true,
wantEncError: false,
wantDec: true,
wantDecError: false,
},
}
func TestComposeDecomposeID(t *testing.T) {
var (
err error
viDecompose CSIIdentifier
composedVolID string
)
for _, test := range testData {
if test.wantEnc {
composedVolID, err = test.vID.ComposeCSIID()
if err != nil && !test.wantEncError {
t.Errorf("Composing failed: want (%#v), got (%#v %#v)",
test, composedVolID, err)
}
if err == nil && test.wantEncError {
t.Errorf("Composing failed: want (%#v), got (%#v %#v)",
test, composedVolID, err)
}
if !test.wantEncError && err == nil && composedVolID != test.composedVolID {
t.Errorf("Composing failed: want (%#v), got (%#v %#v)",
test, composedVolID, err)
}
}
if test.wantDec {
err = viDecompose.DecomposeCSIID(test.composedVolID)
if err != nil && !test.wantDecError {
t.Errorf("Decomposing failed: want (%#v), got (%#v %#v)",
test, viDecompose, err)
}
if err == nil && test.wantDecError {
t.Errorf("Decomposing failed: want (%#v), got (%#v %#v)",
test, viDecompose, err)
}
if !test.wantDecError && err == nil && viDecompose != test.vID {
t.Errorf("Decomposing failed: want (%#v), got (%#v %#v)",
test, viDecompose, err)
}
}
}
}