mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
cleanup: fix mispell words
fixed mispell words in the repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
eee49a6e0a
commit
39b1f2b4d3
4
Makefile
4
Makefile
@ -179,7 +179,7 @@ containerized-test: .container-cmd .test-container-id
|
|||||||
$(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes
|
$(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes
|
||||||
|
|
||||||
ifeq ($(USE_PULLED_IMAGE),no)
|
ifeq ($(USE_PULLED_IMAGE),no)
|
||||||
# create a (cached) container image with dependencied for building cephcsi
|
# create a (cached) container image with dependencies for building cephcsi
|
||||||
.devel-container-id: .container-cmd scripts/Dockerfile.devel
|
.devel-container-id: .container-cmd scripts/Dockerfile.devel
|
||||||
[ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel
|
[ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel
|
||||||
$(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel .
|
$(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel .
|
||||||
@ -191,7 +191,7 @@ else
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(USE_PULLED_IMAGE),no)
|
ifeq ($(USE_PULLED_IMAGE),no)
|
||||||
# create a (cached) container image with dependencied for testing cephcsi
|
# create a (cached) container image with dependencies for testing cephcsi
|
||||||
.test-container-id: .container-cmd build.env scripts/Dockerfile.test
|
.test-container-id: .container-cmd build.env scripts/Dockerfile.test
|
||||||
[ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test
|
[ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test
|
||||||
$(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test .
|
$(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test .
|
||||||
|
@ -56,7 +56,7 @@ spec:
|
|||||||
- "--drivername=cephfs.csi.ceph.com"
|
- "--drivername=cephfs.csi.ceph.com"
|
||||||
# If topology based provisioning is desired, configure required
|
# If topology based provisioning is desired, configure required
|
||||||
# node labels representing the nodes topology domain
|
# node labels representing the nodes topology domain
|
||||||
# and pass the label names below, for CSI to consume and advertize
|
# and pass the label names below, for CSI to consume and advertise
|
||||||
# its equivalent topology domain
|
# its equivalent topology domain
|
||||||
# - "--domainlabels=failure-domain/region,failure-domain/zone"
|
# - "--domainlabels=failure-domain/region,failure-domain/zone"
|
||||||
env:
|
env:
|
||||||
|
@ -57,7 +57,7 @@ spec:
|
|||||||
- "--drivername=rbd.csi.ceph.com"
|
- "--drivername=rbd.csi.ceph.com"
|
||||||
# If topology based provisioning is desired, configure required
|
# If topology based provisioning is desired, configure required
|
||||||
# node labels representing the nodes topology domain
|
# node labels representing the nodes topology domain
|
||||||
# and pass the label names below, for CSI to consume and advertize
|
# and pass the label names below, for CSI to consume and advertise
|
||||||
# its equivalent topology domain
|
# its equivalent topology domain
|
||||||
# - "--domainlabels=failure-domain/region,failure-domain/zone"
|
# - "--domainlabels=failure-domain/region,failure-domain/zone"
|
||||||
env:
|
env:
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/)
|
Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/)
|
||||||
for more information on these sidecar controllers. There should
|
for more information on these sidecar controllers. There should
|
||||||
be a `volumesnapshotclass` object present in the cluster
|
be a `volumesnapshotclass` object present in the cluster
|
||||||
for snapshot request to be satisified.
|
for snapshot request to be satisfied.
|
||||||
|
|
||||||
- To install snapshot controller and CRD
|
- To install snapshot controller and CRD
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ import (
|
|||||||
less noisy if the dollar signs are omitted. Especially when the
|
less noisy if the dollar signs are omitted. Especially when the
|
||||||
command doesn't list the output, but if the command follows output
|
command doesn't list the output, but if the command follows output
|
||||||
we can use '$ ' (dollar+space) mainly to differentiate between
|
we can use '$ ' (dollar+space) mainly to differentiate between
|
||||||
command and its ouput.
|
command and its output.
|
||||||
|
|
||||||
scenario 1: when command doesn't follow output
|
scenario 1: when command doesn't follow output
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ snapshot, Restore RBD snapshot and Create new RBD image from existing RBD image.
|
|||||||
## Create a snapshot from PVC
|
## Create a snapshot from PVC
|
||||||
|
|
||||||
Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
|
Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
|
||||||
for more information realted to Volume cloning in kubernetes.
|
for more information related to Volume cloning in kubernetes.
|
||||||
|
|
||||||
### steps to create a snapshot
|
### steps to create a snapshot
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ for more information realted to Volume cloning in kubernetes.
|
|||||||
rbd snap ls <RBD image for src k8s volume> --all
|
rbd snap ls <RBD image for src k8s volume> --all
|
||||||
|
|
||||||
// If the parent has more snapshots than the configured `maxsnapshotsonimage`
|
// If the parent has more snapshots than the configured `maxsnapshotsonimage`
|
||||||
// add backgound tasks to flatten the temporary cloned images (temporary cloned
|
// add background tasks to flatten the temporary cloned images (temporary cloned
|
||||||
// image names will be same as snapshot names)
|
// image names will be same as snapshot names)
|
||||||
ceph rbd task add flatten <RBD image for temporary snap images>
|
ceph rbd task add flatten <RBD image for temporary snap images>
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ image(this will be applicable for both normal image and cloned image)
|
|||||||
|
|
||||||
Refer
|
Refer
|
||||||
[volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/)
|
[volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/)
|
||||||
for more information realted to Volume cloning in kubernetes.
|
for more information related to Volume cloning in kubernetes.
|
||||||
|
|
||||||
### steps to create a Volume from Volume
|
### steps to create a Volume from Volume
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ it is **highly** encouraged to:
|
|||||||
to set it to `1` as we need to build with go-ceph bindings.
|
to set it to `1` as we need to build with go-ceph bindings.
|
||||||
* `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need
|
* `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need
|
||||||
to set it to `on` as cephcsi uses go modules for dependency.
|
to set it to `on` as cephcsi uses go modules for dependency.
|
||||||
* Ceph-CSI uses the native Ceph libaries through the [go-ceph
|
* Ceph-CSI uses the native Ceph libraries through the [go-ceph
|
||||||
package](https://github.com/ceph/go-ceph). It is required to install the
|
package](https://github.com/ceph/go-ceph). It is required to install the
|
||||||
Ceph C headers in order to compile Ceph-CSI. The packages are called
|
Ceph C headers in order to compile Ceph-CSI. The packages are called
|
||||||
`libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux
|
`libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux
|
||||||
@ -97,7 +97,7 @@ make containerized-test TARGET=static-check
|
|||||||
```
|
```
|
||||||
|
|
||||||
In addition to running tests locally, each Pull Request that is created will
|
In addition to running tests locally, each Pull Request that is created will
|
||||||
trigger Continous Integration tests that include the `containerized-test`, but
|
trigger Continuous Integration tests that include the `containerized-test`, but
|
||||||
also additional functionality tests that are defined under the `e2e/`
|
also additional functionality tests that are defined under the `e2e/`
|
||||||
directory.
|
directory.
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
- [Create CephFS subvolume](#create-cephfs-subvolume)
|
- [Create CephFS subvolume](#create-cephfs-subvolume)
|
||||||
- [Create CephFS static PV](#create-cephfs-static-pv)
|
- [Create CephFS static PV](#create-cephfs-static-pv)
|
||||||
- [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv)
|
- [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv)
|
||||||
- [CephFS Volume Attributes in PV](#cephfs-volume-attributes-in-pv)
|
- [CephFS volume attributes in PV](#cephfs-volume-attributes-in-pv)
|
||||||
- [Create CephFS static PVC](#create-cephfs-static-pvc)
|
- [Create CephFS static PVC](#create-cephfs-static-pvc)
|
||||||
|
|
||||||
This document outlines how to create static PV and static PVC from
|
This document outlines how to create static PV and static PVC from
|
||||||
@ -78,12 +78,12 @@ spec:
|
|||||||
Below table explains the list of volume attributes can be set when creating a
|
Below table explains the list of volume attributes can be set when creating a
|
||||||
static RBD PV
|
static RBD PV
|
||||||
|
|
||||||
| Attributes | Description | Required |
|
| Attributes | Description | Required |
|
||||||
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: |
|
| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
|
||||||
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
|
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
|
||||||
| pool | The pool name in which rbd image is created | Yes |
|
| pool | The pool name in which rbd image is created | Yes |
|
||||||
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes |
|
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes |
|
||||||
| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No |
|
| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No |
|
||||||
|
|
||||||
**Note** ceph-csi does not supports rbd image deletion for static PV.
|
**Note** ceph-csi does not supports rbd image deletion for static PV.
|
||||||
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV
|
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV
|
||||||
@ -122,7 +122,7 @@ $ kubectl create -f fs-static-pvc.yaml
|
|||||||
persistentvolumeclaim/fs-static-pvc created
|
persistentvolumeclaim/fs-static-pvc created
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note** deleting PV and PVC doesnot deleted the backend rbd image, user need to
|
**Note** deleting PV and PVC does not removed the backend rbd image, user need to
|
||||||
manually delete the rbd image if required
|
manually delete the rbd image if required
|
||||||
|
|
||||||
## CephFS static PVC
|
## CephFS static PVC
|
||||||
@ -201,12 +201,12 @@ Format for the secret should be same as detailed [here](https://github.com/ceph/
|
|||||||
Below table explains the list of volume attributes can be set when creating a
|
Below table explains the list of volume attributes can be set when creating a
|
||||||
static CephFS PV
|
static CephFS PV
|
||||||
|
|
||||||
| Attributes | Description | Required |
|
| Attributes | Description | Required |
|
||||||
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: |
|
| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
|
||||||
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
|
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
|
||||||
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
|
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
|
||||||
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |
|
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |
|
||||||
| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes |
|
| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes |
|
||||||
|
|
||||||
**Note** ceph-csi does not supports CephFS subvolume deletion for static PV.
|
**Note** ceph-csi does not supports CephFS subvolume deletion for static PV.
|
||||||
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV
|
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV
|
||||||
|
@ -271,7 +271,7 @@ var _ = Describe("RBD", func() {
|
|||||||
By("create a PVC and bind it to an app with normal user", func() {
|
By("create a PVC and bind it to an app with normal user", func() {
|
||||||
err := validateNormalUserPVCAccess(pvcPath, f)
|
err := validateNormalUserPVCAccess(pvcPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate normal user pvc and application bidning with error %v", err)
|
e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err)
|
||||||
}
|
}
|
||||||
// validate created backend rbd images
|
// validate created backend rbd images
|
||||||
validateRBDImageCount(f, 0)
|
validateRBDImageCount(f, 0)
|
||||||
|
@ -85,7 +85,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
|||||||
pvcName = "pvc-name"
|
pvcName = "pvc-name"
|
||||||
namespace = f.UniqueName
|
namespace = f.UniqueName
|
||||||
// minikube creates default class in cluster, we need to set dummy
|
// minikube creates default class in cluster, we need to set dummy
|
||||||
// storageclass on PV and PVC to avoid storageclass name missmatch
|
// storageclass on PV and PVC to avoid storageclass name mismatch
|
||||||
sc = "storage-class"
|
sc = "storage-class"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
|||||||
pvcName = "pvc-name"
|
pvcName = "pvc-name"
|
||||||
namespace = f.UniqueName
|
namespace = f.UniqueName
|
||||||
// minikube creates default storage class in cluster, we need to set dummy
|
// minikube creates default storage class in cluster, we need to set dummy
|
||||||
// storageclass on PV and PVC to avoid storageclass name missmatch
|
// storageclass on PV and PVC to avoid storageclass name mismatch
|
||||||
sc = "storage-class"
|
sc = "storage-class"
|
||||||
secretName = "cephfs-static-pv-sc" // #nosec
|
secretName = "cephfs-static-pv-sc" // #nosec
|
||||||
)
|
)
|
||||||
|
@ -354,7 +354,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
// wait for application pod to come up after resize
|
// wait for application pod to come up after resize
|
||||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timout waiting for pod to be in running state with error %v", err)
|
e2elog.Failf("timeout waiting for pod to be in running state with error %v", err)
|
||||||
}
|
}
|
||||||
// validate if resize is successful.
|
// validate if resize is successful.
|
||||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||||
|
@ -69,7 +69,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
|||||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
||||||
}
|
}
|
||||||
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
|
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
|
||||||
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
|
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||||
// in that case we are safe and we could discard this error and we are good to go
|
// in that case we are safe and we could discard this error and we are good to go
|
||||||
// ahead with deletion
|
// ahead with deletion
|
||||||
if !errors.Is(err, ErrSnapProtectionExist) {
|
if !errors.Is(err, ErrSnapProtectionExist) {
|
||||||
@ -118,7 +118,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
|||||||
}
|
}
|
||||||
// As we completed clone, remove the intermediate snap
|
// As we completed clone, remove the intermediate snap
|
||||||
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
|
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
|
||||||
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
|
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||||
// in that case we are safe and we could discard this error and we are good to go
|
// in that case we are safe and we could discard this error and we are good to go
|
||||||
// ahead with deletion
|
// ahead with deletion
|
||||||
if !errors.Is(err, ErrSnapProtectionExist) {
|
if !errors.Is(err, ErrSnapProtectionExist) {
|
||||||
|
@ -185,7 +185,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
// while cloning the volume the size is not populated properly to the new volume now.
|
// while cloning the volume the size is not populated properly to the new volume now.
|
||||||
// it will be fixed in cephfs soon with the parentvolume size. Till then by below
|
// it will be fixed in cephfs soon with the parentvolume size. Till then by below
|
||||||
// resize we are making sure we return or satisfy the requested size by setting the size
|
// resize we are making sure we return or satisfy the requested size by setting the size
|
||||||
// explictly
|
// explicitly
|
||||||
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
|
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false)
|
purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false)
|
||||||
|
@ -207,7 +207,7 @@ func unprotectSnapshot(ctx context.Context, volOptions *volumeOptions, cr *util.
|
|||||||
"ceph",
|
"ceph",
|
||||||
args[:]...)
|
args[:]...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
|
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||||
// in that case we are safe and we could discard this error.
|
// in that case we are safe and we could discard this error.
|
||||||
if strings.Contains(err.Error(), snapProtectionExist) {
|
if strings.Contains(err.Error(), snapProtectionExist) {
|
||||||
return nil
|
return nil
|
||||||
|
@ -152,7 +152,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
|
|||||||
func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) {
|
func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) {
|
||||||
tm := ×tamp.Timestamp{}
|
tm := ×tamp.Timestamp{}
|
||||||
layout := "2006-01-02 15:04:05.000000"
|
layout := "2006-01-02 15:04:05.000000"
|
||||||
// TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failng
|
// TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failing
|
||||||
var t time.Time
|
var t time.Time
|
||||||
t, err := time.Parse(layout, createTime)
|
t, err := time.Parse(layout, createTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -96,7 +96,7 @@ func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (
|
|||||||
if strings.HasPrefix(err.Error(), volumeNotFound) {
|
if strings.HasPrefix(err.Error(), volumeNotFound) {
|
||||||
return nil, ErrVolumeNotFound
|
return nil, ErrVolumeNotFound
|
||||||
}
|
}
|
||||||
// Incase the error is other than invalid command return error to the caller.
|
// In case the error is other than invalid command return error to the caller.
|
||||||
if !strings.Contains(err.Error(), invalidCommand) {
|
if !strings.Contains(err.Error(), invalidCommand) {
|
||||||
return nil, ErrInvalidCommand
|
return nil, ErrInvalidCommand
|
||||||
}
|
}
|
||||||
@ -202,7 +202,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
|||||||
clusterAdditionalInfo[vo.ClusterID].resizeSupported = true
|
clusterAdditionalInfo[vo.ClusterID].resizeSupported = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Incase the error is other than invalid command return error to the caller.
|
// In case the error is other than invalid command return error to the caller.
|
||||||
if !strings.Contains(err.Error(), invalidCommand) {
|
if !strings.Contains(err.Error(), invalidCommand) {
|
||||||
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||||
return err
|
return err
|
||||||
|
@ -66,7 +66,7 @@ func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetIn
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeGetCapabilities returns RPC unknow capability.
|
// NodeGetCapabilities returns RPC unknown capability.
|
||||||
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||||
util.TraceLog(ctx, "Using default NodeGetCapabilities")
|
util.TraceLog(ctx, "Using default NodeGetCapabilities")
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.No
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s doesnot exist", targetPath)
|
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ When a volume create request is received (or a snapshot create, the snapshot is
|
|||||||
it is used to read its references to reach the UUID that backs this VolName, to check if the
|
it is used to read its references to reach the UUID that backs this VolName, to check if the
|
||||||
UUID based volume can satisfy the requirements for the request
|
UUID based volume can satisfy the requirements for the request
|
||||||
- If during the process of checking the same, it is found that some linking information is stale
|
- If during the process of checking the same, it is found that some linking information is stale
|
||||||
or missing, the corresponding keys upto the key in the csiDirectory is cleaned up, to start afresh
|
or missing, the corresponding keys up to the key in the csiDirectory is cleaned up, to start afresh
|
||||||
|
|
||||||
- If the key with the CO VolName is not found, or was cleaned up, the request is treated as a
|
- If the key with the CO VolName is not found, or was cleaned up, the request is treated as a
|
||||||
new create request, and an CephUUIDDirectory is created first with a generated uuid, this ensures
|
new create request, and an CephUUIDDirectory is created first with a generated uuid, this ensures
|
||||||
|
@ -110,7 +110,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
// as the temp clone doesnot exists,check snapshot exists on parent volume
|
// as the temp clone does not exist,check snapshot exists on parent volume
|
||||||
// snapshot name is same as temporary clone image
|
// snapshot name is same as temporary clone image
|
||||||
snap.RbdImageName = tempClone.RbdImageName
|
snap.RbdImageName = tempClone.RbdImageName
|
||||||
err = parentVol.checkSnapExists(snap)
|
err = parentVol.checkSnapExists(snap)
|
||||||
|
@ -545,7 +545,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
|
|||||||
if !errors.Is(err, ErrSnapNotFound) {
|
if !errors.Is(err, ErrSnapNotFound) {
|
||||||
return nil, nil, status.Error(codes.Internal, err.Error())
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot doesnot exists", snapshotID)
|
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
|
||||||
}
|
}
|
||||||
return nil, rbdSnap, nil
|
return nil, rbdSnap, nil
|
||||||
case *csi.VolumeContentSource_Volume:
|
case *csi.VolumeContentSource_Volume:
|
||||||
@ -564,7 +564,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
|
|||||||
if !errors.Is(err, ErrImageNotFound) {
|
if !errors.Is(err, ErrImageNotFound) {
|
||||||
return nil, nil, status.Error(codes.Internal, err.Error())
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
return nil, nil, status.Errorf(codes.NotFound, "%s image doesnot exists", volID)
|
return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
|
||||||
}
|
}
|
||||||
return rbdvol, nil, nil
|
return rbdvol, nil, nil
|
||||||
}
|
}
|
||||||
|
@ -492,7 +492,7 @@ func (rv *rbdVolume) flattenRbdImage(ctx context.Context, cr *util.Credentials,
|
|||||||
supported, err := addRbdManagerTask(ctx, rv, args)
|
supported, err := addRbdManagerTask(ctx, rv, args)
|
||||||
if supported {
|
if supported {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// discard flattening error if the image doesnot have any parent
|
// discard flattening error if the image does not have any parent
|
||||||
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
||||||
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
||||||
return nil
|
return nil
|
||||||
|
@ -55,7 +55,7 @@ func (e errorPair) Unwrap() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// JoinErrors combines two errors. Of the returned error, Is() follows the first
|
// JoinErrors combines two errors. Of the returned error, Is() follows the first
|
||||||
// branch, Unwrap() folllows the second branch.
|
// branch, Unwrap() follows the second branch.
|
||||||
func JoinErrors(e1, e2 error) error {
|
func JoinErrors(e1, e2 error) error {
|
||||||
return errorPair{e1, e2}
|
return errorPair{e1, e2}
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
"k8s.io/utils/mount"
|
"k8s.io/utils/mount"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RoundOffVolSize rounds up given quantity upto chunks of MiB/GiB.
|
// RoundOffVolSize rounds up given quantity up to chunks of MiB/GiB.
|
||||||
func RoundOffVolSize(size int64) int64 {
|
func RoundOffVolSize(size int64) int64 {
|
||||||
size = RoundOffBytes(size)
|
size = RoundOffBytes(size)
|
||||||
// convert size back to MiB for rbd CLI
|
// convert size back to MiB for rbd CLI
|
||||||
@ -147,7 +147,7 @@ func GetKernelVersion() (string, error) {
|
|||||||
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
|
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KernelVersion holds kernel related informations.
|
// KernelVersion holds kernel related information.
|
||||||
type KernelVersion struct {
|
type KernelVersion struct {
|
||||||
Version int
|
Version int
|
||||||
PatchLevel int
|
PatchLevel int
|
||||||
|
@ -27,7 +27,7 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
|||||||
// validate stagingpath exists
|
// validate stagingpath exists
|
||||||
ok := checkDirExists(req.GetStagingTargetPath())
|
ok := checkDirExists(req.GetStagingTargetPath())
|
||||||
if !ok {
|
if !ok {
|
||||||
return status.Errorf(codes.InvalidArgument, "staging path %s does not exists on node", req.GetStagingTargetPath())
|
return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) {
|
|||||||
nextFieldStartIdx := (10 + clusterIDLength + 1)
|
nextFieldStartIdx := (10 + clusterIDLength + 1)
|
||||||
|
|
||||||
// minLenToDecode is now 17 as composedCSIID should include
|
// minLenToDecode is now 17 as composedCSIID should include
|
||||||
// atleast 16 for poolID encoding and 1 for '-' separator.
|
// at least 16 for poolID encoding and 1 for '-' separator.
|
||||||
const minLenToDecode = 17
|
const minLenToDecode = 17
|
||||||
if bytesToProcess < minLenToDecode {
|
if bytesToProcess < minLenToDecode {
|
||||||
return errors.New("failed to decode CSI identifier, string underflow")
|
return errors.New("failed to decode CSI identifier, string underflow")
|
||||||
|
@ -25,12 +25,12 @@ DriverInfo:
|
|||||||
RequiredMountOption:
|
RequiredMountOption:
|
||||||
rw: {}
|
rw: {}
|
||||||
|
|
||||||
# Optional list of access modes required for provisiong. Default is RWO
|
# Optional list of access modes required for provisioning. Default is RWO
|
||||||
# RequiredAcccessModes:
|
# RequiredAcccessModes:
|
||||||
|
|
||||||
# Map that represents the capabilities the driver supports
|
# Map that represents the capabilities the driver supports
|
||||||
Capabilities:
|
Capabilities:
|
||||||
# Data is persistest accross pod restarts
|
# Data is persistest across pod restarts
|
||||||
persistence: true
|
persistence: true
|
||||||
|
|
||||||
# Volume ownership via fsGroup
|
# Volume ownership via fsGroup
|
||||||
|
@ -30,12 +30,12 @@ DriverInfo:
|
|||||||
RequiredMountOption:
|
RequiredMountOption:
|
||||||
rw: {}
|
rw: {}
|
||||||
|
|
||||||
# Optional list of access modes required for provisiong. Default is RWO
|
# Optional list of access modes required for provisioning. Default is RWO
|
||||||
# RequiredAcccessModes:
|
# RequiredAcccessModes:
|
||||||
|
|
||||||
# Map that represents the capabilities the driver supports
|
# Map that represents the capabilities the driver supports
|
||||||
Capabilities:
|
Capabilities:
|
||||||
# Data is persistest accross pod restarts
|
# Data is persistest across pod restarts
|
||||||
persistence: true
|
persistence: true
|
||||||
|
|
||||||
# Volume ownership via fsGroup
|
# Volume ownership via fsGroup
|
||||||
|
@ -48,7 +48,7 @@ kubectl_retry() {
|
|||||||
ret=$(grep -cvw 'AlreadyExists' "${stderr}")
|
ret=$(grep -cvw 'AlreadyExists' "${stderr}")
|
||||||
if [ "${ret}" -eq 0 ]
|
if [ "${ret}" -eq 0 ]
|
||||||
then
|
then
|
||||||
# Succes! stderr is empty after removing all "AlreadyExists" lines.
|
# Success! stderr is empty after removing all "AlreadyExists" lines.
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
Loading…
Reference in New Issue
Block a user