diff --git a/Makefile b/Makefile index 1c841a810..24af6a0b9 100644 --- a/Makefile +++ b/Makefile @@ -179,7 +179,7 @@ containerized-test: .container-cmd .test-container-id $(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes ifeq ($(USE_PULLED_IMAGE),no) -# create a (cached) container image with dependencied for building cephcsi +# create a (cached) container image with dependencies for building cephcsi .devel-container-id: .container-cmd scripts/Dockerfile.devel [ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel $(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel . @@ -191,7 +191,7 @@ else endif ifeq ($(USE_PULLED_IMAGE),no) -# create a (cached) container image with dependencied for testing cephcsi +# create a (cached) container image with dependencies for testing cephcsi .test-container-id: .container-cmd build.env scripts/Dockerfile.test [ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test $(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test . diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index 1fd481d62..f15b2ad56 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -56,7 +56,7 @@ spec: - "--drivername=cephfs.csi.ceph.com" # If topology based provisioning is desired, configure required # node labels representing the nodes topology domain - # and pass the label names below, for CSI to consume and advertize + # and pass the label names below, for CSI to consume and advertise # its equivalent topology domain # - "--domainlabels=failure-domain/region,failure-domain/zone" env: diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index 6e6384411..d9d0c0d7c 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -57,7 +57,7 @@ spec: - "--drivername=rbd.csi.ceph.com" # If topology based provisioning is desired, configure required # node labels representing the nodes topology domain - # and pass the label names below, for CSI to consume and advertize + # and pass the label names below, for CSI to consume and advertise # its equivalent topology domain # - "--domainlabels=failure-domain/region,failure-domain/zone" env: diff --git a/docs/cephfs-snap-clone.md b/docs/cephfs-snap-clone.md index df77569ea..35a8fcc66 100644 --- a/docs/cephfs-snap-clone.md +++ b/docs/cephfs-snap-clone.md @@ -17,7 +17,7 @@ Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/) for more information on these sidecar controllers. There should be a `volumesnapshotclass` object present in the cluster - for snapshot request to be satisified. + for snapshot request to be satisfied. - To install snapshot controller and CRD diff --git a/docs/coding.md b/docs/coding.md index 8d6f21e84..5a4c4ba6b 100644 --- a/docs/coding.md +++ b/docs/coding.md @@ -77,7 +77,7 @@ import ( less noisy if the dollar signs are omitted. Especially when the command doesn't list the output, but if the command follows output we can use '$ ' (dollar+space) mainly to differentiate between - command and its ouput. + command and its output. scenario 1: when command doesn't follow output diff --git a/docs/design/proposals/rbd-snap-clone.md b/docs/design/proposals/rbd-snap-clone.md index 0e63ffe42..7085276c0 100644 --- a/docs/design/proposals/rbd-snap-clone.md +++ b/docs/design/proposals/rbd-snap-clone.md @@ -23,7 +23,7 @@ snapshot, Restore RBD snapshot and Create new RBD image from existing RBD image. ## Create a snapshot from PVC Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) -for more information realted to Volume cloning in kubernetes. +for more information related to Volume cloning in kubernetes. ### steps to create a snapshot @@ -44,7 +44,7 @@ for more information realted to Volume cloning in kubernetes. rbd snap ls --all // If the parent has more snapshots than the configured `maxsnapshotsonimage` -// add backgound tasks to flatten the temporary cloned images (temporary cloned +// add background tasks to flatten the temporary cloned images (temporary cloned // image names will be same as snapshot names) ceph rbd task add flatten @@ -125,7 +125,7 @@ image(this will be applicable for both normal image and cloned image) Refer [volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/) -for more information realted to Volume cloning in kubernetes. +for more information related to Volume cloning in kubernetes. ### steps to create a Volume from Volume diff --git a/docs/development-guide.md b/docs/development-guide.md index 05e2a34f1..90e6e4111 100644 --- a/docs/development-guide.md +++ b/docs/development-guide.md @@ -21,7 +21,7 @@ it is **highly** encouraged to: to set it to `1` as we need to build with go-ceph bindings. * `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need to set it to `on` as cephcsi uses go modules for dependency. -* Ceph-CSI uses the native Ceph libaries through the [go-ceph +* Ceph-CSI uses the native Ceph libraries through the [go-ceph package](https://github.com/ceph/go-ceph). It is required to install the Ceph C headers in order to compile Ceph-CSI. The packages are called `libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux @@ -97,7 +97,7 @@ make containerized-test TARGET=static-check ``` In addition to running tests locally, each Pull Request that is created will -trigger Continous Integration tests that include the `containerized-test`, but +trigger Continuous Integration tests that include the `containerized-test`, but also additional functionality tests that are defined under the `e2e/` directory. diff --git a/docs/static-pvc.md b/docs/static-pvc.md index ade36ffc9..f47ad32be 100644 --- a/docs/static-pvc.md +++ b/docs/static-pvc.md @@ -10,7 +10,7 @@ - [Create CephFS subvolume](#create-cephfs-subvolume) - [Create CephFS static PV](#create-cephfs-static-pv) - [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv) - - [CephFS Volume Attributes in PV](#cephfs-volume-attributes-in-pv) + - [CephFS volume attributes in PV](#cephfs-volume-attributes-in-pv) - [Create CephFS static PVC](#create-cephfs-static-pvc) This document outlines how to create static PV and static PVC from @@ -78,12 +78,12 @@ spec: Below table explains the list of volume attributes can be set when creating a static RBD PV -| Attributes | Description | Required | -| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: | +| Attributes | Description | Required | +| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | | clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes | -| pool | The pool name in which rbd image is created | Yes | -| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes | -| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No | +| pool | The pool name in which rbd image is created | Yes | +| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes | +| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No | **Note** ceph-csi does not supports rbd image deletion for static PV. `persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV @@ -122,7 +122,7 @@ $ kubectl create -f fs-static-pvc.yaml persistentvolumeclaim/fs-static-pvc created ``` -**Note** deleting PV and PVC doesnot deleted the backend rbd image, user need to +**Note** deleting PV and PVC does not removed the backend rbd image, user need to manually delete the rbd image if required ## CephFS static PVC @@ -201,12 +201,12 @@ Format for the secret should be same as detailed [here](https://github.com/ceph/ Below table explains the list of volume attributes can be set when creating a static CephFS PV -| Attributes | Description | Required | -| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: | +| Attributes | Description | Required | +| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | | clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes | -| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes | -| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes | -| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes | +| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes | +| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes | +| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes | **Note** ceph-csi does not supports CephFS subvolume deletion for static PV. `persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV diff --git a/e2e/rbd.go b/e2e/rbd.go index a037afd3b..4f866a545 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -271,7 +271,7 @@ var _ = Describe("RBD", func() { By("create a PVC and bind it to an app with normal user", func() { err := validateNormalUserPVCAccess(pvcPath, f) if err != nil { - e2elog.Failf("failed to validate normal user pvc and application bidning with error %v", err) + e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) diff --git a/e2e/staticpvc.go b/e2e/staticpvc.go index 89f2c1e8a..aade234cb 100644 --- a/e2e/staticpvc.go +++ b/e2e/staticpvc.go @@ -85,7 +85,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e pvcName = "pvc-name" namespace = f.UniqueName // minikube creates default class in cluster, we need to set dummy - // storageclass on PV and PVC to avoid storageclass name missmatch + // storageclass on PV and PVC to avoid storageclass name mismatch sc = "storage-class" ) @@ -176,7 +176,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro pvcName = "pvc-name" namespace = f.UniqueName // minikube creates default storage class in cluster, we need to set dummy - // storageclass on PV and PVC to avoid storageclass name missmatch + // storageclass on PV and PVC to avoid storageclass name mismatch sc = "storage-class" secretName = "cephfs-static-pv-sc" // #nosec ) diff --git a/e2e/upgrade-cephfs.go b/e2e/upgrade-cephfs.go index e524f1c47..8aa77cd87 100644 --- a/e2e/upgrade-cephfs.go +++ b/e2e/upgrade-cephfs.go @@ -354,7 +354,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { // wait for application pod to come up after resize err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timout waiting for pod to be in running state with error %v", err) + e2elog.Failf("timeout waiting for pod to be in running state with error %v", err) } // validate if resize is successful. err = checkDirSize(app, f, &opt, pvcExpandSize) diff --git a/internal/cephfs/clone.go b/internal/cephfs/clone.go index 99dbc5573..6d0b0abc5 100644 --- a/internal/cephfs/clone.go +++ b/internal/cephfs/clone.go @@ -69,7 +69,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err) } if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil { - // Incase the snap is already unprotected we get ErrSnapProtectionExist error code + // In case the snap is already unprotected we get ErrSnapProtectionExist error code // in that case we are safe and we could discard this error and we are good to go // ahead with deletion if !errors.Is(err, ErrSnapProtectionExist) { @@ -118,7 +118,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO } // As we completed clone, remove the intermediate snap if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil { - // Incase the snap is already unprotected we get ErrSnapProtectionExist error code + // In case the snap is already unprotected we get ErrSnapProtectionExist error code // in that case we are safe and we could discard this error and we are good to go // ahead with deletion if !errors.Is(err, ErrSnapProtectionExist) { diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index a59096c50..629154423 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -185,7 +185,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // while cloning the volume the size is not populated properly to the new volume now. // it will be fixed in cephfs soon with the parentvolume size. Till then by below // resize we are making sure we return or satisfy the requested size by setting the size - // explictly + // explicitly err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size) if err != nil { purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false) diff --git a/internal/cephfs/snapshot.go b/internal/cephfs/snapshot.go index b0d39d9b6..46cd71d05 100644 --- a/internal/cephfs/snapshot.go +++ b/internal/cephfs/snapshot.go @@ -207,7 +207,7 @@ func unprotectSnapshot(ctx context.Context, volOptions *volumeOptions, cr *util. "ceph", args[:]...) if err != nil { - // Incase the snap is already unprotected we get ErrSnapProtectionExist error code + // In case the snap is already unprotected we get ErrSnapProtectionExist error code // in that case we are safe and we could discard this error. if strings.Contains(err.Error(), snapProtectionExist) { return nil diff --git a/internal/cephfs/util.go b/internal/cephfs/util.go index c0958954f..56ef1a970 100644 --- a/internal/cephfs/util.go +++ b/internal/cephfs/util.go @@ -152,7 +152,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) { tm := ×tamp.Timestamp{} layout := "2006-01-02 15:04:05.000000" - // TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failng + // TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failing var t time.Time t, err := time.Parse(layout, createTime) if err != nil { diff --git a/internal/cephfs/volume.go b/internal/cephfs/volume.go index 8a50e8e5b..fd6ff8144 100644 --- a/internal/cephfs/volume.go +++ b/internal/cephfs/volume.go @@ -96,7 +96,7 @@ func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) ( if strings.HasPrefix(err.Error(), volumeNotFound) { return nil, ErrVolumeNotFound } - // Incase the error is other than invalid command return error to the caller. + // In case the error is other than invalid command return error to the caller. if !strings.Contains(err.Error(), invalidCommand) { return nil, ErrInvalidCommand } @@ -202,7 +202,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes clusterAdditionalInfo[vo.ClusterID].resizeSupported = true return nil } - // Incase the error is other than invalid command return error to the caller. + // In case the error is other than invalid command return error to the caller. if !strings.Contains(err.Error(), invalidCommand) { util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err) return err diff --git a/internal/csi-common/nodeserver-default.go b/internal/csi-common/nodeserver-default.go index 22e945f77..8f26626d3 100644 --- a/internal/csi-common/nodeserver-default.go +++ b/internal/csi-common/nodeserver-default.go @@ -66,7 +66,7 @@ func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetIn }, nil } -// NodeGetCapabilities returns RPC unknow capability. +// NodeGetCapabilities returns RPC unknown capability. func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { util.TraceLog(ctx, "Using default NodeGetCapabilities") @@ -116,7 +116,7 @@ func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.No if err != nil { if os.IsNotExist(err) { - return nil, status.Errorf(codes.InvalidArgument, "targetpath %s doesnot exist", targetPath) + return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath) } return nil, err } diff --git a/internal/journal/voljournal.go b/internal/journal/voljournal.go index 45a459d75..5b44f3d3e 100644 --- a/internal/journal/voljournal.go +++ b/internal/journal/voljournal.go @@ -73,7 +73,7 @@ When a volume create request is received (or a snapshot create, the snapshot is it is used to read its references to reach the UUID that backs this VolName, to check if the UUID based volume can satisfy the requirements for the request - If during the process of checking the same, it is found that some linking information is stale - or missing, the corresponding keys upto the key in the csiDirectory is cleaned up, to start afresh + or missing, the corresponding keys up to the key in the csiDirectory is cleaned up, to start afresh - If the key with the CO VolName is not found, or was cleaned up, the request is treated as a new create request, and an CephUUIDDirectory is created first with a generated uuid, this ensures diff --git a/internal/rbd/clone.go b/internal/rbd/clone.go index 202fd2e40..008d94a40 100644 --- a/internal/rbd/clone.go +++ b/internal/rbd/clone.go @@ -110,7 +110,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume) } return true, nil } - // as the temp clone doesnot exists,check snapshot exists on parent volume + // as the temp clone does not exist,check snapshot exists on parent volume // snapshot name is same as temporary clone image snap.RbdImageName = tempClone.RbdImageName err = parentVol.checkSnapExists(snap) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 310b86160..b69dde223 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -545,7 +545,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u if !errors.Is(err, ErrSnapNotFound) { return nil, nil, status.Error(codes.Internal, err.Error()) } - return nil, nil, status.Errorf(codes.NotFound, "%s snapshot doesnot exists", snapshotID) + return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID) } return nil, rbdSnap, nil case *csi.VolumeContentSource_Volume: @@ -564,7 +564,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u if !errors.Is(err, ErrImageNotFound) { return nil, nil, status.Error(codes.Internal, err.Error()) } - return nil, nil, status.Errorf(codes.NotFound, "%s image doesnot exists", volID) + return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID) } return rbdvol, nil, nil } diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 71e9f885a..f003de090 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -492,7 +492,7 @@ func (rv *rbdVolume) flattenRbdImage(ctx context.Context, cr *util.Credentials, supported, err := addRbdManagerTask(ctx, rv, args) if supported { if err != nil { - // discard flattening error if the image doesnot have any parent + // discard flattening error if the image does not have any parent rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName) if strings.Contains(err.Error(), rbdFlattenNoParent) { return nil diff --git a/internal/util/errors.go b/internal/util/errors.go index 444d6fb3b..a40d63b93 100644 --- a/internal/util/errors.go +++ b/internal/util/errors.go @@ -55,7 +55,7 @@ func (e errorPair) Unwrap() error { } // JoinErrors combines two errors. Of the returned error, Is() follows the first -// branch, Unwrap() folllows the second branch. +// branch, Unwrap() follows the second branch. func JoinErrors(e1, e2 error) error { return errorPair{e1, e2} } diff --git a/internal/util/util.go b/internal/util/util.go index 0c6c79046..a907fd745 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -34,7 +34,7 @@ import ( "k8s.io/utils/mount" ) -// RoundOffVolSize rounds up given quantity upto chunks of MiB/GiB. +// RoundOffVolSize rounds up given quantity up to chunks of MiB/GiB. func RoundOffVolSize(size int64) int64 { size = RoundOffBytes(size) // convert size back to MiB for rbd CLI @@ -147,7 +147,7 @@ func GetKernelVersion() (string, error) { return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil } -// KernelVersion holds kernel related informations. +// KernelVersion holds kernel related information. type KernelVersion struct { Version int PatchLevel int diff --git a/internal/util/validate.go b/internal/util/validate.go index bc75bd685..1083f831e 100644 --- a/internal/util/validate.go +++ b/internal/util/validate.go @@ -27,7 +27,7 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error { // validate stagingpath exists ok := checkDirExists(req.GetStagingTargetPath()) if !ok { - return status.Errorf(codes.InvalidArgument, "staging path %s does not exists on node", req.GetStagingTargetPath()) + return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath()) } return nil } diff --git a/internal/util/volid.go b/internal/util/volid.go index 281a79336..0f452d702 100644 --- a/internal/util/volid.go +++ b/internal/util/volid.go @@ -130,7 +130,7 @@ func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) { nextFieldStartIdx := (10 + clusterIDLength + 1) // minLenToDecode is now 17 as composedCSIID should include - // atleast 16 for poolID encoding and 1 for '-' separator. + // at least 16 for poolID encoding and 1 for '-' separator. const minLenToDecode = 17 if bytesToProcess < minLenToDecode { return errors.New("failed to decode CSI identifier, string underflow") diff --git a/scripts/k8s-storage/driver-cephfs.yaml b/scripts/k8s-storage/driver-cephfs.yaml index 44a9addfa..b91cc8974 100644 --- a/scripts/k8s-storage/driver-cephfs.yaml +++ b/scripts/k8s-storage/driver-cephfs.yaml @@ -25,12 +25,12 @@ DriverInfo: RequiredMountOption: rw: {} - # Optional list of access modes required for provisiong. Default is RWO + # Optional list of access modes required for provisioning. Default is RWO # RequiredAcccessModes: # Map that represents the capabilities the driver supports Capabilities: - # Data is persistest accross pod restarts + # Data is persistest across pod restarts persistence: true # Volume ownership via fsGroup diff --git a/scripts/k8s-storage/driver-rbd-rwo.yaml b/scripts/k8s-storage/driver-rbd-rwo.yaml index afb3ae6ff..f2598f8b9 100644 --- a/scripts/k8s-storage/driver-rbd-rwo.yaml +++ b/scripts/k8s-storage/driver-rbd-rwo.yaml @@ -30,12 +30,12 @@ DriverInfo: RequiredMountOption: rw: {} - # Optional list of access modes required for provisiong. Default is RWO + # Optional list of access modes required for provisioning. Default is RWO # RequiredAcccessModes: # Map that represents the capabilities the driver supports Capabilities: - # Data is persistest accross pod restarts + # Data is persistest across pod restarts persistence: true # Volume ownership via fsGroup diff --git a/scripts/rook.sh b/scripts/rook.sh index cd47a98c7..c7b65ea9e 100755 --- a/scripts/rook.sh +++ b/scripts/rook.sh @@ -48,7 +48,7 @@ kubectl_retry() { ret=$(grep -cvw 'AlreadyExists' "${stderr}") if [ "${ret}" -eq 0 ] then - # Succes! stderr is empty after removing all "AlreadyExists" lines. + # Success! stderr is empty after removing all "AlreadyExists" lines. break fi fi