Merge pull request #63 from ceph/devel

Sync rhs/ceph-csi:devel with ceph/ceph-csi:devel
This commit is contained in:
OpenShift Merge Robot 2021-12-23 15:06:11 +01:00 committed by GitHub
commit f48bc1a433
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 1291 additions and 184 deletions

View File

@ -19,7 +19,7 @@ rules:
# Body shouldn't be empty
body-min-length: [2, always, 1]
# Wrap the lines to 80 characters.
body-max-line-length: [1, always, 80]
body-max-line-length: [2, always, 80]
# always sign off the commit
trailer-exists: [2, always, "Signed-off-by:"]

View File

@ -96,9 +96,9 @@ for its support details.
| CephFS | Dynamically provision, de-provision File mode RWO volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWX volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
| | Creating and deleting snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.3) | >= v1.17.0 |
| | Provision volume from snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.3) | >= v1.17.0 |
| | Provision volume from another volume | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.3) | >= v1.16.0 |
| | Creating and deleting snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
| | Provision volume from snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
| | Provision volume from another volume | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
| | Volume/PV Metrics of File Mode Volume | Beta | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |

View File

@ -1,5 +1,4 @@
{{- if .Values.rbac.create -}}
{{- if .Values.topology.enabled }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -18,5 +17,4 @@ roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end -}}

View File

@ -71,6 +71,7 @@ spec:
- "--nodeserver=true"
- "--pidlimit=-1"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
{{- if .Values.topology.enabled }}
@ -92,6 +93,8 @@ spec:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.pluginSocketFile }}"
- name: CSI_ADDONS_ENDPOINT
value: "unix:///csi/csi-addons.sock"
securityContext:
privileged: true
capabilities:

View File

@ -137,6 +137,7 @@ spec:
- "--controllerserver=true"
- "--pidlimit=-1"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
- "--rbdhardmaxclonedepth={{ .Values.provisioner.hardMaxCloneDepth }}"
@ -162,6 +163,8 @@ spec:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: CSI_ADDONS_ENDPOINT
value: "unix:///csi/csi-addons.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi

View File

@ -59,7 +59,7 @@ var conf util.Config
func init() {
// common flags
flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|liveness|controller]")
flag.StringVar(&conf.Endpoint, "endpoint", "unix://tmp/csi.sock", "CSI endpoint")
flag.StringVar(&conf.Endpoint, "endpoint", "unix:///tmp/csi.sock", "CSI endpoint")
flag.StringVar(&conf.DriverName, "drivername", "", "name of the driver")
flag.StringVar(&conf.DriverNamespace, "drivernamespace", defaultNS, "namespace in which driver is deployed")
flag.StringVar(&conf.NodeID, "nodeid", "", "node id")
@ -129,7 +129,7 @@ func init() {
flag.BoolVar(&conf.EnableProfiling, "enableprofiling", false, "enable go profiling")
// CSI-Addons configuration
flag.StringVar(&conf.CSIAddonsEndpoint, "csi-addons-endpoint", "unix://tmp/csi-addons.sock", "CSI-Addons endpoint")
flag.StringVar(&conf.CSIAddonsEndpoint, "csi-addons-endpoint", "unix:///tmp/csi-addons.sock", "CSI-Addons endpoint")
klog.InitFlags(nil)
if err := flag.Set("logtostderr", "true"); err != nil {

View File

@ -118,6 +118,7 @@ spec:
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
@ -141,6 +142,8 @@ spec:
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir

View File

@ -58,6 +58,7 @@ spec:
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--enableprofiling=false"
@ -83,6 +84,8 @@ spec:
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir

View File

@ -28,23 +28,24 @@ make image-cephcsi
| Option | Default value | Description |
| ------------------------ | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--drivername` | `rbd.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID |
| `--type` | _empty_ | Driver type: `[rbd/cephfs]`. If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` |
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
| `--metricsport` | `8080` | TCP port for liveness metrics requests |
| `--metricspath` | `"/metrics"` | Path of prometheus endpoint where metrics will be available |
| `--enablegrpcmetrics` | `false` | [Deprecated] Enable grpc metrics collection and start prometheus server |
| `--polltime` | `"60s"` | Time interval in between each poll |
| `--timeout` | `"3s"` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | [Deprecated] Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
| `--rbdhardmaxclonedepth` | `8` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs |
| `--rbdsoftmaxclonedepth` | `4` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs |
| `--skipforceflatten` | `false` | skip image flattening on kernel < 5.2 which support mapping of rbd images which has the deep-flatten feature |
| `--maxsnapshotsonimage` | `450` | Maximum number of snapshots allowed on rbd image without flattening |
| `--endpoint` | `unix:///tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--csi-addons-endpoint` | `unix:///tmp/csi-addons.sock` | CSI-Addons endpoint, must be a UNIX socket |
| `--drivername` | `rbd.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID |
| `--type` | _empty_ | Driver type: `[rbd/cephfs]`. If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` |
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
| `--metricsport` | `8080` | TCP port for liveness metrics requests |
| `--metricspath` | `"/metrics"` | Path of prometheus endpoint where metrics will be available |
| `--enablegrpcmetrics` | `false` | [Deprecated] Enable grpc metrics collection and start prometheus server |
| `--polltime` | `"60s"` | Time interval in between each poll |
| `--timeout` | `"3s"` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | [Deprecated] Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
| `--rbdhardmaxclonedepth` | `8` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs |
| `--rbdsoftmaxclonedepth` | `4` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs |
| `--skipforceflatten` | `false` | skip image flattening on kernel < 5.2 which support mapping of rbd images which has the deep-flatten feature |
| `--maxsnapshotsonimage` | `450` | Maximum number of snapshots allowed on rbd image without flattening |
**Available volume parameters:**

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

100
e2e/clone.go Normal file
View File

@ -0,0 +1,100 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
func validateBiggerCloneFromPVC(f *framework.Framework,
pvcPath,
appPath,
pvcClonePath,
appClonePath string) error {
const (
size = "1Gi"
newSize = "2Gi"
)
pvc, err := loadPVC(pvcPath)
if err != nil {
return fmt.Errorf("failed to load PVC: %w", err)
}
label := make(map[string]string)
pvc.Namespace = f.UniqueName
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
app, err := loadApp(appPath)
if err != nil {
return fmt.Errorf("failed to load app: %w", err)
}
label[appKey] = appLabel
app.Namespace = f.UniqueName
app.Labels = label
opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
}
err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil {
return fmt.Errorf("failed to create pvc and application: %w", err)
}
pvcClone, err := loadPVC(pvcClonePath)
if err != nil {
e2elog.Failf("failed to load PVC: %v", err)
}
pvcClone.Namespace = f.UniqueName
pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
appClone, err := loadApp(appClonePath)
if err != nil {
e2elog.Failf("failed to load application: %v", err)
}
appClone.Namespace = f.UniqueName
appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil {
return fmt.Errorf("failed to create pvc clone and application: %w", err)
}
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
return fmt.Errorf("failed to delete pvc and application: %w", err)
}
if pvcClone.Spec.VolumeMode == nil || *pvcClone.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
err = checkDirSize(appClone, f, &opt, newSize)
if err != nil {
return err
}
}
if pvcClone.Spec.VolumeMode != nil && *pvcClone.Spec.VolumeMode == v1.PersistentVolumeBlock {
err = checkDeviceSize(appClone, f, &opt, newSize)
if err != nil {
return err
}
}
err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil {
return fmt.Errorf("failed to delete pvc and application: %w", err)
}
return nil
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@ -60,6 +76,8 @@ var (
appClonePath = rbdExamplePath + "pod-restore.yaml"
appSmartClonePath = rbdExamplePath + "pod-clone.yaml"
appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml"
pvcBlockRestorePath = rbdExamplePath + "pvc-block-restore.yaml"
appBlockRestorePath = rbdExamplePath + "pod-block-restore.yaml"
appEphemeralPath = rbdExamplePath + "pod-ephemeral.yaml"
snapshotPath = rbdExamplePath + "snapshot.yaml"
deployFSAppPath = e2eTemplatesPath + "rbd-fs-deployment.yaml"
@ -3338,6 +3356,181 @@ var _ = Describe("RBD", func() {
}
})
By("restore snapshot to a bigger size PVC", func() {
By("restore snapshot to bigger size pvc", func() {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
}
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
}
defer func() {
err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
}
}()
err = createRBDSnapshotClass(f)
if err != nil {
e2elog.Failf("failed to create VolumeSnapshotClass: %v", err)
}
defer func() {
err = deleteRBDSnapshotClass()
if err != nil {
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
}
}()
// validate filesystem mode PVC
err = validateBiggerPVCFromSnapshot(f,
pvcPath,
appPath,
snapshotPath,
pvcClonePath,
appClonePath)
if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
// validate block mode PVC
err = validateBiggerPVCFromSnapshot(f,
rawPvcPath,
rawAppPath,
snapshotPath,
pvcBlockRestorePath,
appBlockRestorePath)
if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
})
By("restore snapshot to bigger size encrypted PVC with VaultKMS", func() {
scOpts := map[string]string{
"encrypted": "true",
"encryptionKMSID": "vault-test",
}
err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
}
defer func() {
err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
}
}()
err = createRBDSnapshotClass(f)
if err != nil {
e2elog.Failf("failed to create VolumeSnapshotClass: %v", err)
}
defer func() {
err = deleteRBDSnapshotClass()
if err != nil {
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
}
}()
// validate filesystem mode PVC
err = validateBiggerPVCFromSnapshot(f,
pvcPath,
appPath,
snapshotPath,
pvcClonePath,
appClonePath)
if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
// validate block mode PVC
err = validateBiggerPVCFromSnapshot(f,
rawPvcPath,
rawAppPath,
snapshotPath,
pvcBlockRestorePath,
appBlockRestorePath)
if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
})
By("validate image deletion", func() {
validateRBDImageCount(f, 0, defaultRBDPool)
err := waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout)
if err != nil {
e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err)
}
})
})
By("clone PVC to a bigger size PVC", func() {
By("clone PVC to bigger size encrypted PVC with VaultKMS", func() {
scOpts := map[string]string{
"encrypted": "true",
"encryptionKMSID": "vault-test",
}
err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
}
defer func() {
err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
}
}()
// validate filesystem mode PVC
err = validateBiggerCloneFromPVC(f,
pvcPath,
appPath,
pvcSmartClonePath,
appSmartClonePath)
if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
// validate block mode PVC
err = validateBiggerCloneFromPVC(f,
rawPvcPath,
rawAppPath,
pvcBlockSmartClonePath,
appBlockSmartClonePath)
if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
})
By("clone PVC to bigger size pvc", func() {
err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
if err != nil {
e2elog.Failf("failed to create storageclass: %v", err)
}
// validate filesystem mode PVC
err = validateBiggerCloneFromPVC(f,
pvcPath,
appPath,
pvcSmartClonePath,
appSmartClonePath)
if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
// validate block mode PVC
err = validateBiggerCloneFromPVC(f,
rawPvcPath,
rawAppPath,
pvcBlockSmartClonePath,
appBlockSmartClonePath)
if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
})
By("validate image deletion", func() {
validateRBDImageCount(f, 0, defaultRBDPool)
err := waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout)
if err != nil {
e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err)
}
})
})
// Make sure this should be last testcase in this file, because
// it deletes pool
By("Create a PVC and delete PVC when backend pool deleted", func() {

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@ -9,7 +25,9 @@ import (
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1"
. "github.com/onsi/gomega" // nolint
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@ -218,3 +236,88 @@ func getVolumeSnapshotContent(namespace, snapshotName string) (*snapapi.VolumeSn
return volumeSnapshotContent, nil
}
func validateBiggerPVCFromSnapshot(f *framework.Framework,
pvcPath,
appPath,
snapPath,
pvcClonePath,
appClonePath string) error {
const (
size = "1Gi"
newSize = "2Gi"
)
pvc, err := loadPVC(pvcPath)
if err != nil {
return fmt.Errorf("failed to load PVC: %w", err)
}
label := make(map[string]string)
pvc.Namespace = f.UniqueName
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
app, err := loadApp(appPath)
if err != nil {
return fmt.Errorf("failed to load app: %w", err)
}
label[appKey] = appLabel
app.Namespace = f.UniqueName
app.Labels = label
opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
}
err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil {
return fmt.Errorf("failed to create pvc and application: %w", err)
}
snap := getSnapshot(snapPath)
snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = createSnapshot(&snap, deployTimeout)
if err != nil {
return fmt.Errorf("failed to create snapshot: %w", err)
}
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
return fmt.Errorf("failed to delete pvc and application: %w", err)
}
pvcClone, err := loadPVC(pvcClonePath)
if err != nil {
e2elog.Failf("failed to load PVC: %v", err)
}
pvcClone.Namespace = f.UniqueName
pvcClone.Spec.DataSource.Name = snap.Name
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
appClone, err := loadApp(appClonePath)
if err != nil {
e2elog.Failf("failed to load application: %v", err)
}
appClone.Namespace = f.UniqueName
appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil {
return fmt.Errorf("failed to create pvc clone and application: %w", err)
}
err = deleteSnapshot(&snap, deployTimeout)
if err != nil {
return fmt.Errorf("failed to delete snapshot: %w", err)
}
if pvcClone.Spec.VolumeMode == nil || *pvcClone.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
err = checkDirSize(appClone, f, &opt, newSize)
if err != nil {
return fmt.Errorf("failed to validate directory size: %w", err)
}
}
if pvcClone.Spec.VolumeMode != nil && *pvcClone.Spec.VolumeMode == v1.PersistentVolumeBlock {
err = checkDeviceSize(appClone, f, &opt, newSize)
if err != nil {
return fmt.Errorf("failed to validate device size: %w", err)
}
}
err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil {
return fmt.Errorf("failed to delete pvc and application: %w", err)
}
return nil
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-block-volume-restore
spec:
containers:
- name: centos
image: quay.io/centos/centos:latest
command: ["/bin/sleep", "infinity"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: rbd-block-pvc-restore

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-block-pvc-restore
spec:
storageClassName: csi-rbd-sc
dataSource:
name: rbd-pvc-snapshot
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi

View File

@ -128,7 +128,7 @@ func (rv *rbdVolume) generateTempClone() *rbdVolume {
tempClone.conn = rv.conn.Copy()
// The temp clone image need to have deep flatten feature
f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten}
tempClone.imageFeatureSet = librbd.FeatureSetFromNames(f)
tempClone.ImageFeatureSet = librbd.FeatureSetFromNames(f)
tempClone.ClusterID = rv.ClusterID
tempClone.Monitors = rv.Monitors
tempClone.Pool = rv.Pool
@ -181,6 +181,14 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
return err
}
// expand the image if the requested size is greater than the current size
err = rv.expand()
if err != nil {
log.ErrorLog(ctx, "failed to resize volume %s: %v", rv, err)
return err
}
return nil
}

View File

@ -136,6 +136,8 @@ func (cs *ControllerServer) parseVolCreateRequest(
// always round up the request size in bytes to the nearest MiB/GiB
rbdVol.VolSize = util.RoundOffBytes(volSizeBytes)
// RequestedVolSize has the size of the volume requested by the user.
rbdVol.RequestedVolSize = rbdVol.VolSize
// start with pool the same as journal pool, in case there is a topology
// based split, pool for the image will be updated subsequently
@ -192,48 +194,8 @@ func getGRPCErrorForCreateVolume(err error) error {
return status.Error(codes.Internal, err.Error())
}
// validateRequestedVolumeSize validates the request volume size with the
// source snapshot or volume size, if there is a size mismatches it returns an error.
func validateRequestedVolumeSize(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
if rbdSnap != nil {
vol := generateVolFromSnap(rbdSnap)
err := vol.Connect(cr)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
defer vol.Destroy()
err = vol.getImageInfo()
if err != nil {
return status.Error(codes.Internal, err.Error())
}
if rbdVol.VolSize != vol.VolSize {
return status.Errorf(
codes.InvalidArgument,
"size mismatches, requested volume size %d and source snapshot size %d",
rbdVol.VolSize,
vol.VolSize)
}
}
if parentVol != nil {
if rbdVol.VolSize != parentVol.VolSize {
return status.Errorf(
codes.InvalidArgument,
"size mismatches, requested volume size %d and source volume size %d",
rbdVol.VolSize,
parentVol.VolSize)
}
}
return nil
}
func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
err := validateRequestedVolumeSize(rbdVol, parentVol, rbdSnap, cr)
if err != nil {
return err
}
func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot) error {
var err error
switch {
case rbdSnap != nil:
err = rbdSnap.isCompatibleEncryption(&rbdVol.rbdImage)
@ -309,7 +271,7 @@ func (cs *ControllerServer) CreateVolume(
return cs.repairExistingVolume(ctx, req, cr, rbdVol, parentVol, rbdSnap)
}
err = checkValidCreateVolumeRequest(rbdVol, parentVol, rbdSnap, cr)
err = checkValidCreateVolumeRequest(rbdVol, parentVol, rbdSnap)
if err != nil {
return nil, err
}
@ -427,6 +389,14 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
return nil, err
}
// expand the image if the requested size is greater than the current size
err = rbdVol.expand()
if err != nil {
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
return nil, err
}
// rbdVol is a clone from parentVol
case vcs.GetVolume() != nil:
// When cloning into a thick-provisioned volume was happening,
@ -443,25 +413,41 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
rbdVol,
err)
} else if !thick {
err = cleanUpSnapshot(ctx, parentVol, rbdSnap, rbdVol, cr)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to remove partially cloned volume %q: %s", rbdVol, err)
}
err = undoVolReservation(ctx, rbdVol, cr)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to remove volume %q from journal: %s", rbdVol, err)
}
return nil, status.Errorf(
codes.Internal,
"cloning thick-provisioned volume %q has been interrupted, please retry", rbdVol)
return nil, cleanupThickClone(ctx, parentVol, rbdVol, rbdSnap, cr)
}
}
// expand the image if the requested size is greater than the current size
err := rbdVol.expand()
if err != nil {
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
return nil, err
}
}
return buildCreateVolumeResponse(req, rbdVol), nil
}
// cleanupThickClone will delete the snapshot and volume and undo the reservation.
func cleanupThickClone(ctx context.Context,
rbdVol,
parentVol *rbdVolume,
rbdSnap *rbdSnapshot,
cr *util.Credentials) error {
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, rbdVol, cr)
if err != nil {
return status.Errorf(codes.Internal, "failed to remove partially cloned volume %q: %s", rbdVol, err)
}
err = undoVolReservation(ctx, rbdVol, cr)
if err != nil {
return status.Errorf(codes.Internal, "failed to remove volume %q from journal: %s", rbdVol, err)
}
return status.Errorf(
codes.Internal,
"cloning thick-provisioned volume %q has been interrupted, please retry", rbdVol)
}
// check snapshots on the rbd image, as we have limit from krbd that an image
// cannot have more than 510 snapshot at a given point of time. If the
// snapshots are more than the `maxSnapshotsOnImage` Add a task to flatten all
@ -610,7 +596,16 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
}
}
log.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
log.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol, rbdSnap)
// resize the volume if the size is different
// expand the image if the requested size is greater than the current size
err = rbdVol.expand()
if err != nil {
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
return err
}
return nil
}
@ -642,7 +637,6 @@ func (cs *ControllerServer) createBackingImage(
if err != nil {
return err
}
log.DebugLog(ctx, "created volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
case parentVol != nil:
if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil {
log.ErrorLog(ctx, err.Error())
@ -661,7 +655,7 @@ func (cs *ControllerServer) createBackingImage(
}
}
log.DebugLog(ctx, "created volume %s backed by image %s", rbdVol.RequestName, rbdVol.RbdImageName)
log.DebugLog(ctx, "created image %s backed for request name %s", rbdVol, rbdVol.RequestName)
defer func() {
if err != nil {
@ -1030,7 +1024,7 @@ func (cs *ControllerServer) CreateSnapshot(
return nil, status.Error(codes.Internal, err.Error())
}
rbdSnap.RbdImageName = rbdVol.RbdImageName
rbdSnap.SizeBytes = rbdVol.VolSize
rbdSnap.VolSize = rbdVol.VolSize
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
rbdSnap.RequestName = req.GetName()
@ -1163,7 +1157,7 @@ func cloneFromSnapshot(
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SizeBytes: rbdSnap.SizeBytes,
SizeBytes: rbdSnap.VolSize,
SnapshotId: rbdSnap.VolID,
SourceVolumeId: rbdSnap.SourceVolumeID,
CreationTime: rbdSnap.CreatedAt,
@ -1209,7 +1203,7 @@ func (cs *ControllerServer) doSnapshotClone(
defer cloneRbd.Destroy()
// add image feature for cloneRbd
f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten}
cloneRbd.imageFeatureSet = librbd.FeatureSetFromNames(f)
cloneRbd.ImageFeatureSet = librbd.FeatureSetFromNames(f)
err := cloneRbd.Connect(cr)
if err != nil {
@ -1356,6 +1350,17 @@ func (cs *ControllerServer) DeleteSnapshot(
return &csi.DeleteSnapshotResponse{}, nil
}
// if the error is ErrImageNotFound, We need to cleanup the image from
// trash and remove the metadata in OMAP.
if errors.Is(err, ErrImageNotFound) {
err = cleanUpImageAndSnapReservation(ctx, rbdSnap, cr)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.DeleteSnapshotResponse{}, nil
}
return nil, status.Error(codes.Internal, err.Error())
}
@ -1379,28 +1384,14 @@ func (cs *ControllerServer) DeleteSnapshot(
}
defer rbdVol.Destroy()
err = rbdVol.getImageInfo()
rbdVol.ImageID = rbdSnap.ImageID
// update parent name to delete the snapshot
rbdSnap.RbdImageName = rbdVol.RbdImageName
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
if err != nil {
if errors.Is(err, ErrImageNotFound) {
err = rbdVol.ensureImageCleanup(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
} else {
log.ErrorLog(ctx, "failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
log.ErrorLog(ctx, "failed to delete image: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
} else {
rbdVol.ImageID = rbdSnap.ImageID
// update parent name to delete the snapshot
rbdSnap.RbdImageName = rbdVol.RbdImageName
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
if err != nil {
log.ErrorLog(ctx, "failed to delete image: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
err = undoSnapReservation(ctx, rbdSnap, cr)
if err != nil {
@ -1413,6 +1404,39 @@ func (cs *ControllerServer) DeleteSnapshot(
return &csi.DeleteSnapshotResponse{}, nil
}
// cleanUpImageAndSnapReservation cleans up the image from the trash and
// snapshot reservation in rados OMAP.
func cleanUpImageAndSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
rbdVol := generateVolFromSnap(rbdSnap)
err := rbdVol.Connect(cr)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
defer rbdVol.Destroy()
err = rbdVol.openIoctx()
if err != nil {
return status.Error(codes.Internal, err.Error())
}
// cleanup the image from trash if the error is image not found.
err = rbdVol.ensureImageCleanup(ctx)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %q with error: %v", rbdVol.Pool, rbdVol.VolName, err)
return status.Error(codes.Internal, err.Error())
}
err = undoSnapReservation(ctx, rbdSnap, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap %q",
rbdSnap.RequestName, rbdSnap, err)
return status.Error(codes.Internal, err.Error())
}
return nil
}
// ControllerExpandVolume expand RBD Volumes on demand based on resizer request.
func (cs *ControllerServer) ControllerExpandVolume(
ctx context.Context,
@ -1447,9 +1471,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
rbdVol, err := GenVolFromVolID(ctx, volID, cr, req.GetSecrets())
defer rbdVol.Destroy()
rbdVol, err := genVolFromVolIDWithMigration(ctx, volID, cr, req.GetSecrets())
if err != nil {
switch {
case errors.Is(err, ErrImageNotFound):
@ -1463,6 +1485,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
return nil, err
}
defer rbdVol.Destroy()
// NodeExpansion is needed for PersistentVolumes with,
// 1. Filesystem VolumeMode with & without Encryption and

View File

@ -77,35 +77,41 @@ func parseMigrationVolID(vh string) (*migrationVolID, error) {
// deleteMigratedVolume get rbd volume details from the migration volID
// and delete the volume from the cluster, return err if there was an error on the process.
func deleteMigratedVolume(ctx context.Context, parsedMigHandle *migrationVolID, cr *util.Credentials) error {
rv, err := genVolFromMigVolID(ctx, parsedMigHandle, cr)
if err != nil {
return err
}
defer rv.Destroy()
err = deleteImage(ctx, rv, cr)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s, err: %v", rv, err)
}
return err
}
// genVolFromMigVolID populate rbdVol struct from the migration volID.
func genVolFromMigVolID(ctx context.Context, migVolID *migrationVolID, cr *util.Credentials) (*rbdVolume, error) {
var err error
rv := &rbdVolume{}
// fill details to rv struct from parsed migration handle
rv.RbdImageName = parsedMigHandle.imageName
rv.Pool = parsedMigHandle.poolName
rv.ClusterID = parsedMigHandle.clusterID
rv.RbdImageName = migVolID.imageName
rv.Pool = migVolID.poolName
rv.ClusterID = migVolID.clusterID
rv.Monitors, err = util.Mons(util.CsiConfigFile, rv.ClusterID)
if err != nil {
log.ErrorLog(ctx, "failed to fetch monitors using clusterID: %s, err: %v", rv.ClusterID, err)
return err
return nil, err
}
// connect to the volume.
err = rv.Connect(cr)
if err != nil {
log.ErrorLog(ctx, "failed to get connected to the rbd image : %s, err: %v", rv.RbdImageName, err)
return err
}
defer rv.Destroy()
// if connected , delete it
err = deleteImage(ctx, rv, cr)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image : %s, err: %v", rv.RbdImageName, err)
return err
return nil, err
}
return nil
return rv, nil
}

View File

@ -16,7 +16,11 @@ limitations under the License.
package rbd
import (
"context"
"fmt"
"time"
"github.com/ceph/ceph-csi/internal/util"
librbd "github.com/ceph/go-ceph/rbd"
)
@ -84,6 +88,35 @@ func (ri *rbdImage) promoteImage(force bool) error {
return nil
}
// forcePromoteImage promotes image to primary with force option with 1 minute
// timeout. If there is no response within 1 minute,the rbd CLI process will be
// killed and an error is returned.
func (rv *rbdVolume) forcePromoteImage(cr *util.Credentials) error {
promoteArgs := []string{
"mirror", "image", "promote",
rv.String(),
"--force",
"--id", cr.ID,
"-m", rv.Monitors,
"--keyfile=" + cr.KeyFile,
}
_, stderr, err := util.ExecCommandWithTimeout(
context.TODO(),
time.Minute,
"rbd",
promoteArgs...,
)
if err != nil {
return fmt.Errorf("failed to promote image %q with error: %w", rv, err)
}
if stderr != "" {
return fmt.Errorf("failed to promote image %q with stderror: %s", rv, stderr)
}
return nil
}
// demoteImage demotes image to secondary.
func (ri *rbdImage) demoteImage() error {
image, err := ri.open()

View File

@ -427,26 +427,15 @@ func (ns *NodeServer) stageTransaction(
}
transaction.isMounted = true
// resize if its fileSystemType static volume.
if staticVol && !isBlock {
var ok bool
resizer := mount.NewResizeFs(utilexec.New())
ok, err = resizer.NeedResize(devicePath, stagingTargetPath)
if err != nil {
return transaction, status.Errorf(codes.Internal,
"need resize check failed on devicePath %s and staingPath %s, error: %v",
devicePath,
stagingTargetPath,
err)
}
if ok {
ok, err = resizer.Resize(devicePath, stagingTargetPath)
if !ok {
return transaction, status.Errorf(codes.Internal,
"resize failed on path %s, error: %v", stagingTargetPath, err)
}
}
// As we are supporting the restore of a volume to a bigger size and
// creating bigger size clone from a volume, we need to check filesystem
// resize is required, if required resize filesystem.
// in case of encrypted block PVC resize only the LUKS device.
err = resizeNodeStagePath(ctx, isBlock, transaction, req.GetVolumeId(), stagingTargetPath)
if err != nil {
return transaction, err
}
if !readOnly {
// #nosec - allow anyone to write inside the target path
err = os.Chmod(stagingTargetPath, 0o777)
@ -455,6 +444,87 @@ func (ns *NodeServer) stageTransaction(
return transaction, err
}
// resizeNodeStagePath resizes the device if its encrypted and it also resizes
// the stagingTargetPath if filesystem needs resize.
func resizeNodeStagePath(ctx context.Context,
isBlock bool,
transaction *stageTransaction,
volID,
stagingTargetPath string) error {
var err error
devicePath := transaction.devicePath
var ok bool
// if its a non encrypted block device we dont need any expansion
if isBlock && !transaction.isEncrypted {
return nil
}
resizer := mount.NewResizeFs(utilexec.New())
if transaction.isEncrypted {
devicePath, err = resizeEncryptedDevice(ctx, volID, stagingTargetPath, devicePath)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
}
// check stagingPath needs resize.
ok, err = resizer.NeedResize(devicePath, stagingTargetPath)
if err != nil {
return status.Errorf(codes.Internal,
"need resize check failed on devicePath %s and staingPath %s, error: %v",
devicePath,
stagingTargetPath,
err)
}
// return nil if no resize is required
if !ok {
return nil
}
ok, err = resizer.Resize(devicePath, stagingTargetPath)
if !ok {
return status.Errorf(codes.Internal,
"resize failed on path %s, error: %v", stagingTargetPath, err)
}
return nil
}
func resizeEncryptedDevice(ctx context.Context, volID, stagingTargetPath, devicePath string) (string, error) {
rbdDevSize, err := getDeviceSize(ctx, devicePath)
if err != nil {
return "", fmt.Errorf(
"failed to get device size of %s and staingPath %s, error: %w",
devicePath,
stagingTargetPath,
err)
}
_, mapperPath := util.VolumeMapper(volID)
encDevSize, err := getDeviceSize(ctx, mapperPath)
if err != nil {
return "", fmt.Errorf(
"failed to get device size of %s and staingPath %s, error: %w",
mapperPath,
stagingTargetPath,
err)
}
// if the rbd device `/dev/rbd0` size is greater than LUKS device size
// we need to resize the LUKS device.
if rbdDevSize > encDevSize {
// The volume is encrypted, resize an active mapping
err = util.ResizeEncryptedVolume(ctx, mapperPath)
if err != nil {
log.ErrorLog(ctx, "failed to resize device %s: %v",
mapperPath, err)
return "", fmt.Errorf(
"failed to resize device %s: %w", mapperPath, err)
}
}
return mapperPath, nil
}
func flattenImageBeforeMapping(
ctx context.Context,
volOptions *rbdVolume,
@ -1168,3 +1238,22 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
},
}, nil
}
// getDeviceSize gets the block device size.
func getDeviceSize(ctx context.Context, devicePath string) (uint64, error) {
output, _, err := util.ExecCommand(ctx, "blockdev", "--getsize64", devicePath)
if err != nil {
return 0, fmt.Errorf("blockdev %v returned an error: %w", devicePath, err)
}
outStr := strings.TrimSpace(output)
if err != nil {
return 0, fmt.Errorf("failed to read size of device %s: %s: %w", devicePath, outStr, err)
}
size, err := strconv.ParseUint(outStr, 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse size of device %s %s: %w", devicePath, outStr, err)
}
return size, nil
}

View File

@ -176,7 +176,7 @@ func checkSnapCloneExists(
// Code from here on, rolls the transaction forward.
rbdSnap.CreatedAt = vol.CreatedAt
rbdSnap.SizeBytes = vol.VolSize
rbdSnap.VolSize = vol.VolSize
// found a snapshot already available, process and return its information
rbdSnap.VolID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, snapData.ImagePoolID, rbdSnap.Pool,
rbdSnap.ClusterID, snapUUID, volIDVersion)

View File

@ -107,6 +107,9 @@ type rbdImage struct {
// identifying this rbd image
VolID string `json:"volID"`
// VolSize is the size of the RBD image backing this rbdImage.
VolSize int64
Monitors string
// JournalPool is the ceph pool in which the CSI Journal/CSI snapshot Journal is
// stored
@ -124,6 +127,12 @@ type rbdImage struct {
RequestName string
NamePrefix string
// ParentName represents the parent image name of the image.
ParentName string
// Parent Pool is the pool that contains the parent image.
ParentPool string
ImageFeatureSet librbd.FeatureSet
// encryption provides access to optional VolumeEncryption functions
encryption *util.VolumeEncryption
// Owner is the creator (tenant, Kubernetes Namespace) of the volume
@ -148,23 +157,21 @@ type rbdVolume struct {
Topology map[string]string
// DataPool is where the data for images in `Pool` are stored, this is used as the `--data-pool`
// argument when the pool is created, and is not used anywhere else
DataPool string
ParentName string
// Parent Pool is the pool that contains the parent image.
ParentPool string
imageFeatureSet librbd.FeatureSet
AdminID string `json:"adminId"`
UserID string `json:"userId"`
Mounter string `json:"mounter"`
DataPool string
AdminID string
UserID string
Mounter string
ReservedID string
MapOptions string
UnmapOptions string
LogDir string
LogStrategy string
VolName string `json:"volName"`
MonValueFromSecret string `json:"monValueFromSecret"`
VolSize int64 `json:"volSize"`
DisableInUseChecks bool `json:"disableInUseChecks"`
VolName string
MonValueFromSecret string
// RequestedVolSize has the size of the volume requested by the user and
// this value will not be updated when doing getImageInfo() on rbdVolume.
RequestedVolSize int64
DisableInUseChecks bool
readOnly bool
Primary bool
ThickProvision bool
@ -179,7 +186,6 @@ type rbdSnapshot struct {
SourceVolumeID string
ReservedID string
RbdSnapName string
SizeBytes int64
}
// imageFeature represents required image features and value.
@ -345,10 +351,10 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
}
}
log.DebugLog(ctx, logMsg,
pOpts, volSzMiB, pOpts.imageFeatureSet.Names(), pOpts.Monitors)
pOpts, volSzMiB, pOpts.ImageFeatureSet.Names(), pOpts.Monitors)
if pOpts.imageFeatureSet != 0 {
err := options.SetUint64(librbd.RbdImageOptionFeatures, uint64(pOpts.imageFeatureSet))
if pOpts.ImageFeatureSet != 0 {
err := options.SetUint64(librbd.RbdImageOptionFeatures, uint64(pOpts.ImageFeatureSet))
if err != nil {
return fmt.Errorf("failed to set image features: %w", err)
}
@ -921,7 +927,7 @@ func (rv *rbdVolume) flatten() error {
}
func (rv *rbdVolume) hasFeature(feature uint64) bool {
return (uint64(rv.imageFeatureSet) & feature) == feature
return (uint64(rv.ImageFeatureSet) & feature) == feature
}
func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint64) (bool, error) {
@ -1051,9 +1057,33 @@ func genSnapFromSnapID(
}
}
err = updateSnapshotDetails(rbdSnap)
if err != nil {
return fmt.Errorf("failed to update snapshot details for %q: %w", rbdSnap, err)
}
return err
}
// updateSnapshotDetails will copies the details from the rbdVolume to the
// rbdSnapshot. example copying size from rbdVolume to rbdSnapshot.
func updateSnapshotDetails(rbdSnap *rbdSnapshot) error {
vol := generateVolFromSnap(rbdSnap)
err := vol.Connect(rbdSnap.conn.Creds)
if err != nil {
return err
}
defer vol.Destroy()
err = vol.getImageInfo()
if err != nil {
return err
}
rbdSnap.VolSize = vol.VolSize
return nil
}
// generateVolumeFromVolumeID generates a rbdVolume structure from the provided identifier.
func generateVolumeFromVolumeID(
ctx context.Context,
@ -1287,7 +1317,7 @@ func genVolFromVolumeOptions(
ctx,
"setting disableInUseChecks: %t image features: %v mounter: %s",
disableInUseChecks,
rbdVol.imageFeatureSet.Names(),
rbdVol.ImageFeatureSet.Names(),
rbdVol.Mounter)
rbdVol.DisableInUseChecks = disableInUseChecks
@ -1325,7 +1355,7 @@ func (rv *rbdVolume) validateImageFeatures(imageFeatures string) error {
return fmt.Errorf("feature %s requires rbd-nbd for mounter", f)
}
}
rv.imageFeatureSet = librbd.FeatureSetFromNames(arr)
rv.ImageFeatureSet = librbd.FeatureSetFromNames(arr)
return nil
}
@ -1358,7 +1388,7 @@ func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[
// hasSnapshotFeature checks if Layering is enabled for this image.
func (rv *rbdVolume) hasSnapshotFeature() bool {
return (uint64(rv.imageFeatureSet) & librbd.FeatureLayering) == librbd.FeatureLayering
return (uint64(rv.ImageFeatureSet) & librbd.FeatureLayering) == librbd.FeatureLayering
}
func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
@ -1422,10 +1452,10 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
}
log.DebugLog(ctx, logMsg,
pSnapOpts, rv, rv.imageFeatureSet.Names(), rv.Monitors)
pSnapOpts, rv, rv.ImageFeatureSet.Names(), rv.Monitors)
if rv.imageFeatureSet != 0 {
err = options.SetUint64(librbd.RbdImageOptionFeatures, uint64(rv.imageFeatureSet))
if rv.ImageFeatureSet != 0 {
err = options.SetUint64(librbd.RbdImageOptionFeatures, uint64(rv.ImageFeatureSet))
if err != nil {
return fmt.Errorf("failed to set image features: %w", err)
}
@ -1473,6 +1503,12 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
}
}
// get image latest information
err = rv.getImageInfo()
if err != nil {
return fmt.Errorf("failed to get image info of %s: %w", rv, err)
}
// Success! Do not delete the cloned image now :)
deleteClone = false
@ -1499,7 +1535,7 @@ func (rv *rbdVolume) getImageInfo() error {
if err != nil {
return err
}
rv.imageFeatureSet = librbd.FeatureSet(features)
rv.ImageFeatureSet = librbd.FeatureSet(features)
// Get parent information.
parentInfo, err := image.GetParent()
@ -1675,6 +1711,16 @@ func cleanupRBDImageMetadataStash(metaDataPath string) error {
return nil
}
// expand checks if the requestedVolume size and the existing image size both
// are same. If they are same, it returns nil else it resizes the image.
func (rv *rbdVolume) expand() error {
if rv.RequestedVolSize == rv.VolSize {
return nil
}
return rv.resize(rv.RequestedVolSize)
}
// resize the given volume to new size.
// updates Volsize of rbdVolume object to newSize in case of success.
func (rv *rbdVolume) resize(newSize int64) error {
@ -2071,3 +2117,22 @@ func strategicActionOnLogFile(ctx context.Context, logStrategy, logFile string)
log.ErrorLog(ctx, "unknown cephLogStrategy option %q: hint: 'remove'|'compress'|'preserve'", logStrategy)
}
}
// genVolFromVolIDWithMigration populate a rbdVol structure based on the volID format.
func genVolFromVolIDWithMigration(
ctx context.Context, volID string, cr *util.Credentials, secrets map[string]string) (*rbdVolume, error) {
if isMigrationVolID(volID) {
pmVolID, pErr := parseMigrationVolID(volID)
if pErr != nil {
return nil, pErr
}
return genVolFromMigVolID(ctx, pmVolID, cr)
}
rv, err := GenVolFromVolID(ctx, volID, cr, secrets)
if err != nil {
rv.Destroy()
}
return rv, err
}

View File

@ -41,7 +41,7 @@ func TestHasSnapshotFeature(t *testing.T) {
rv := rbdVolume{}
for _, test := range tests {
rv.imageFeatureSet = librbd.FeatureSetFromNames(strings.Split(test.features, ","))
rv.ImageFeatureSet = librbd.FeatureSetFromNames(strings.Split(test.features, ","))
if got := rv.hasSnapshotFeature(); got != test.hasFeature {
t.Errorf("hasSnapshotFeature(%s) = %t, want %t", test.features, got, test.hasFeature)
}

View File

@ -324,7 +324,7 @@ func createDummyImage(ctx context.Context, rbdVol *rbdVolume) error {
librbd.FeatureNameFastDiff,
}
features := librbd.FeatureSetFromNames(f)
dummyVol.imageFeatureSet = features
dummyVol.ImageFeatureSet = features
// create 1MiB dummy image. 1MiB=1048576 bytes
dummyVol.VolSize = 1048576
err = createImage(ctx, &dummyVol, dummyVol.conn.Creds)
@ -557,7 +557,13 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
// promote secondary to primary
if !mirroringInfo.Primary {
err = rbdVol.promoteImage(req.Force)
if req.GetForce() {
// workaround for https://github.com/ceph/ceph-csi/issues/2736
// TODO: remove this workaround when the issue is fixed
err = rbdVol.forcePromoteImage(cr)
} else {
err = rbdVol.promoteImage(req.GetForce())
}
if err != nil {
log.ErrorLog(ctx, err.Error())
// In case of the DR the image on the primary site cannot be

View File

@ -64,17 +64,6 @@ func createRBDClone(
return err
}
err = cloneRbdVol.getImageInfo()
if err != nil {
log.ErrorLog(ctx, "failed to get rbd image: %s details with error: %v", cloneRbdVol, err)
delErr := deleteImage(ctx, cloneRbdVol, cr)
if delErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
}
return err
}
return nil
}

View File

@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"os/exec"
"time"
"github.com/ceph/ceph-csi/internal/util/log"
@ -65,6 +66,59 @@ func ExecCommand(ctx context.Context, program string, args ...string) (string, s
return stdout, stderr, nil
}
// ExecCommandWithTimeout executes passed in program with args, timeout and
// returns separate stdout and stderr streams. If the command is not executed
// within given timeout, the process will be killed. In case ctx is not set to
// context.TODO(), the command will be logged after it was executed.
func ExecCommandWithTimeout(
ctx context.Context,
timeout time.Duration,
program string,
args ...string) (
string,
string,
error) {
var (
sanitizedArgs = StripSecretInArgs(args)
stdoutBuf bytes.Buffer
stderrBuf bytes.Buffer
)
cctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
cmd := exec.CommandContext(cctx, program, args...) // #nosec:G204, commands executing not vulnerable.
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
err := cmd.Run()
stdout := stdoutBuf.String()
stderr := stderrBuf.String()
if err != nil {
// if its a timeout log return context deadline exceeded error message
if errors.Is(cctx.Err(), context.DeadlineExceeded) {
err = fmt.Errorf("timeout: %w", cctx.Err())
}
err = fmt.Errorf("an error (%w) and stderror (%s) occurred while running %s args: %v",
err,
stderr,
program,
sanitizedArgs)
if ctx != context.TODO() {
log.ErrorLog(ctx, "%s", err)
}
return stdout, stderr, err
}
if ctx != context.TODO() {
log.UsefulLog(ctx, "command succeeded: %s %v", program, sanitizedArgs)
}
return stdout, stderr, nil
}
// GetPoolID fetches the ID of the pool that matches the passed in poolName
// parameter.
func GetPoolID(monitors string, cr *Credentials, poolName string) (int64, error) {

View File

@ -0,0 +1,89 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"errors"
"testing"
"time"
)
func TestExecCommandWithTimeout(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
program string
timeout time.Duration
args []string
}
tests := []struct {
name string
args args
stdout string
expectedErr error
wantErr bool
}{
{
name: "echo hello",
args: args{
ctx: context.TODO(),
program: "echo",
timeout: time.Second,
args: []string{"hello"},
},
stdout: "hello\n",
expectedErr: nil,
wantErr: false,
},
{
name: "sleep with timeout",
args: args{
ctx: context.TODO(),
program: "sleep",
timeout: time.Second,
args: []string{"3"},
},
stdout: "",
expectedErr: context.DeadlineExceeded,
wantErr: true,
},
}
for _, tt := range tests {
newtt := tt
t.Run(newtt.name, func(t *testing.T) {
t.Parallel()
stdout, _, err := ExecCommandWithTimeout(newtt.args.ctx,
newtt.args.timeout,
newtt.args.program,
newtt.args.args...)
if (err != nil) != newtt.wantErr {
t.Errorf("ExecCommandWithTimeout() error = %v, wantErr %v", err, newtt.wantErr)
return
}
if newtt.wantErr && !errors.Is(err, newtt.expectedErr) {
t.Errorf("ExecCommandWithTimeout() error expected got = %v, want %v", err, newtt.expectedErr)
}
if stdout != newtt.stdout {
t.Errorf("ExecCommandWithTimeout() got = %v, want %v", stdout, newtt.stdout)
}
})
}
}

View File

@ -46,7 +46,7 @@ RUN source /build.env \
&& mkdir -p ${GOROOT} \
&& curl https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz \
| tar xzf - -C ${GOROOT} --strip-components=1 \
&& curl -sf "https://install.goreleaser.com/github.com/golangci/golangci-lint.sh" \
&& curl -sf "https://raw.githubusercontent.com/golangci/golangci-lint/${GOLANGCI_VERSION}/install.sh" \
| bash -s -- -b ${GOPATH}/bin "${GOLANGCI_VERSION}" \
&& curl -L https://git.io/get_helm.sh | bash -s -- --version "${HELM_VERSION}" \
&& mkdir /opt/commitlint && pushd /opt/commitlint \