cleanup: fix mispell words

fixed mispell words in the repo.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2020-11-24 17:24:29 +05:30 committed by Madhu Rajanna
parent eee49a6e0a
commit 39b1f2b4d3
28 changed files with 51 additions and 51 deletions

View File

@ -179,7 +179,7 @@ containerized-test: .container-cmd .test-container-id
$(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes
ifeq ($(USE_PULLED_IMAGE),no)
# create a (cached) container image with dependencied for building cephcsi
# create a (cached) container image with dependencies for building cephcsi
.devel-container-id: .container-cmd scripts/Dockerfile.devel
[ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel
$(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel .
@ -191,7 +191,7 @@ else
endif
ifeq ($(USE_PULLED_IMAGE),no)
# create a (cached) container image with dependencied for testing cephcsi
# create a (cached) container image with dependencies for testing cephcsi
.test-container-id: .container-cmd build.env scripts/Dockerfile.test
[ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test
$(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test .

View File

@ -56,7 +56,7 @@ spec:
- "--drivername=cephfs.csi.ceph.com"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertize
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
env:

View File

@ -57,7 +57,7 @@ spec:
- "--drivername=rbd.csi.ceph.com"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertize
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
env:

View File

@ -17,7 +17,7 @@
Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/)
for more information on these sidecar controllers. There should
be a `volumesnapshotclass` object present in the cluster
for snapshot request to be satisified.
for snapshot request to be satisfied.
- To install snapshot controller and CRD

View File

@ -77,7 +77,7 @@ import (
less noisy if the dollar signs are omitted. Especially when the
command doesn't list the output, but if the command follows output
we can use '$ ' (dollar+space) mainly to differentiate between
command and its ouput.
command and its output.
scenario 1: when command doesn't follow output

View File

@ -23,7 +23,7 @@ snapshot, Restore RBD snapshot and Create new RBD image from existing RBD image.
## Create a snapshot from PVC
Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
for more information realted to Volume cloning in kubernetes.
for more information related to Volume cloning in kubernetes.
### steps to create a snapshot
@ -44,7 +44,7 @@ for more information realted to Volume cloning in kubernetes.
rbd snap ls <RBD image for src k8s volume> --all
// If the parent has more snapshots than the configured `maxsnapshotsonimage`
// add backgound tasks to flatten the temporary cloned images (temporary cloned
// add background tasks to flatten the temporary cloned images (temporary cloned
// image names will be same as snapshot names)
ceph rbd task add flatten <RBD image for temporary snap images>
@ -125,7 +125,7 @@ image(this will be applicable for both normal image and cloned image)
Refer
[volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/)
for more information realted to Volume cloning in kubernetes.
for more information related to Volume cloning in kubernetes.
### steps to create a Volume from Volume

View File

@ -21,7 +21,7 @@ it is **highly** encouraged to:
to set it to `1` as we need to build with go-ceph bindings.
* `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need
to set it to `on` as cephcsi uses go modules for dependency.
* Ceph-CSI uses the native Ceph libaries through the [go-ceph
* Ceph-CSI uses the native Ceph libraries through the [go-ceph
package](https://github.com/ceph/go-ceph). It is required to install the
Ceph C headers in order to compile Ceph-CSI. The packages are called
`libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux
@ -97,7 +97,7 @@ make containerized-test TARGET=static-check
```
In addition to running tests locally, each Pull Request that is created will
trigger Continous Integration tests that include the `containerized-test`, but
trigger Continuous Integration tests that include the `containerized-test`, but
also additional functionality tests that are defined under the `e2e/`
directory.

View File

@ -10,7 +10,7 @@
- [Create CephFS subvolume](#create-cephfs-subvolume)
- [Create CephFS static PV](#create-cephfs-static-pv)
- [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv)
- [CephFS Volume Attributes in PV](#cephfs-volume-attributes-in-pv)
- [CephFS volume attributes in PV](#cephfs-volume-attributes-in-pv)
- [Create CephFS static PVC](#create-cephfs-static-pvc)
This document outlines how to create static PV and static PVC from
@ -78,12 +78,12 @@ spec:
Below table explains the list of volume attributes can be set when creating a
static RBD PV
| Attributes | Description | Required |
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| Attributes | Description | Required |
| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
| pool | The pool name in which rbd image is created | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes |
| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No |
| pool | The pool name in which rbd image is created | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes |
| mounter | If set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | No |
**Note** ceph-csi does not supports rbd image deletion for static PV.
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV
@ -122,7 +122,7 @@ $ kubectl create -f fs-static-pvc.yaml
persistentvolumeclaim/fs-static-pvc created
```
**Note** deleting PV and PVC doesnot deleted the backend rbd image, user need to
**Note** deleting PV and PVC does not removed the backend rbd image, user need to
manually delete the rbd image if required
## CephFS static PVC
@ -201,12 +201,12 @@ Format for the secret should be same as detailed [here](https://github.com/ceph/
Below table explains the list of volume attributes can be set when creating a
static CephFS PV
| Attributes | Description | Required |
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| Attributes | Description | Required |
| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |
| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes |
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |
| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes |
**Note** ceph-csi does not supports CephFS subvolume deletion for static PV.
`persistentVolumeReclaimPolicy` in PV spec must be set to `Retain` to avoid PV

View File

@ -271,7 +271,7 @@ var _ = Describe("RBD", func() {
By("create a PVC and bind it to an app with normal user", func() {
err := validateNormalUserPVCAccess(pvcPath, f)
if err != nil {
e2elog.Failf("failed to validate normal user pvc and application bidning with error %v", err)
e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 0)

View File

@ -85,7 +85,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
pvcName = "pvc-name"
namespace = f.UniqueName
// minikube creates default class in cluster, we need to set dummy
// storageclass on PV and PVC to avoid storageclass name missmatch
// storageclass on PV and PVC to avoid storageclass name mismatch
sc = "storage-class"
)
@ -176,7 +176,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
pvcName = "pvc-name"
namespace = f.UniqueName
// minikube creates default storage class in cluster, we need to set dummy
// storageclass on PV and PVC to avoid storageclass name missmatch
// storageclass on PV and PVC to avoid storageclass name mismatch
sc = "storage-class"
secretName = "cephfs-static-pv-sc" // #nosec
)

View File

@ -354,7 +354,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// wait for application pod to come up after resize
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timout waiting for pod to be in running state with error %v", err)
e2elog.Failf("timeout waiting for pod to be in running state with error %v", err)
}
// validate if resize is successful.
err = checkDirSize(app, f, &opt, pvcExpandSize)

View File

@ -69,7 +69,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
}
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go
// ahead with deletion
if !errors.Is(err, ErrSnapProtectionExist) {
@ -118,7 +118,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
}
// As we completed clone, remove the intermediate snap
if err = unprotectSnapshot(ctx, parentvolOpt, cr, snapshotID, volID); err != nil {
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go
// ahead with deletion
if !errors.Is(err, ErrSnapProtectionExist) {

View File

@ -185,7 +185,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// while cloning the volume the size is not populated properly to the new volume now.
// it will be fixed in cephfs soon with the parentvolume size. Till then by below
// resize we are making sure we return or satisfy the requested size by setting the size
// explictly
// explicitly
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
if err != nil {
purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false)

View File

@ -207,7 +207,7 @@ func unprotectSnapshot(ctx context.Context, volOptions *volumeOptions, cr *util.
"ceph",
args[:]...)
if err != nil {
// Incase the snap is already unprotected we get ErrSnapProtectionExist error code
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error.
if strings.Contains(err.Error(), snapProtectionExist) {
return nil

View File

@ -152,7 +152,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) {
tm := &timestamp.Timestamp{}
layout := "2006-01-02 15:04:05.000000"
// TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failng
// TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failing
var t time.Time
t, err := time.Parse(layout, createTime)
if err != nil {

View File

@ -96,7 +96,7 @@ func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (
if strings.HasPrefix(err.Error(), volumeNotFound) {
return nil, ErrVolumeNotFound
}
// Incase the error is other than invalid command return error to the caller.
// In case the error is other than invalid command return error to the caller.
if !strings.Contains(err.Error(), invalidCommand) {
return nil, ErrInvalidCommand
}
@ -202,7 +202,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
clusterAdditionalInfo[vo.ClusterID].resizeSupported = true
return nil
}
// Incase the error is other than invalid command return error to the caller.
// In case the error is other than invalid command return error to the caller.
if !strings.Contains(err.Error(), invalidCommand) {
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
return err

View File

@ -66,7 +66,7 @@ func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetIn
}, nil
}
// NodeGetCapabilities returns RPC unknow capability.
// NodeGetCapabilities returns RPC unknown capability.
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
util.TraceLog(ctx, "Using default NodeGetCapabilities")
@ -116,7 +116,7 @@ func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.No
if err != nil {
if os.IsNotExist(err) {
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s doesnot exist", targetPath)
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)
}
return nil, err
}

View File

@ -73,7 +73,7 @@ When a volume create request is received (or a snapshot create, the snapshot is
it is used to read its references to reach the UUID that backs this VolName, to check if the
UUID based volume can satisfy the requirements for the request
- If during the process of checking the same, it is found that some linking information is stale
or missing, the corresponding keys upto the key in the csiDirectory is cleaned up, to start afresh
or missing, the corresponding keys up to the key in the csiDirectory is cleaned up, to start afresh
- If the key with the CO VolName is not found, or was cleaned up, the request is treated as a
new create request, and an CephUUIDDirectory is created first with a generated uuid, this ensures

View File

@ -110,7 +110,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
}
return true, nil
}
// as the temp clone doesnot exists,check snapshot exists on parent volume
// as the temp clone does not exist,check snapshot exists on parent volume
// snapshot name is same as temporary clone image
snap.RbdImageName = tempClone.RbdImageName
err = parentVol.checkSnapExists(snap)

View File

@ -545,7 +545,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
if !errors.Is(err, ErrSnapNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error())
}
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot doesnot exists", snapshotID)
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
}
return nil, rbdSnap, nil
case *csi.VolumeContentSource_Volume:
@ -564,7 +564,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
if !errors.Is(err, ErrImageNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error())
}
return nil, nil, status.Errorf(codes.NotFound, "%s image doesnot exists", volID)
return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
}
return rbdvol, nil, nil
}

View File

@ -492,7 +492,7 @@ func (rv *rbdVolume) flattenRbdImage(ctx context.Context, cr *util.Credentials,
supported, err := addRbdManagerTask(ctx, rv, args)
if supported {
if err != nil {
// discard flattening error if the image doesnot have any parent
// discard flattening error if the image does not have any parent
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
if strings.Contains(err.Error(), rbdFlattenNoParent) {
return nil

View File

@ -55,7 +55,7 @@ func (e errorPair) Unwrap() error {
}
// JoinErrors combines two errors. Of the returned error, Is() follows the first
// branch, Unwrap() folllows the second branch.
// branch, Unwrap() follows the second branch.
func JoinErrors(e1, e2 error) error {
return errorPair{e1, e2}
}

View File

@ -34,7 +34,7 @@ import (
"k8s.io/utils/mount"
)
// RoundOffVolSize rounds up given quantity upto chunks of MiB/GiB.
// RoundOffVolSize rounds up given quantity up to chunks of MiB/GiB.
func RoundOffVolSize(size int64) int64 {
size = RoundOffBytes(size)
// convert size back to MiB for rbd CLI
@ -147,7 +147,7 @@ func GetKernelVersion() (string, error) {
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
}
// KernelVersion holds kernel related informations.
// KernelVersion holds kernel related information.
type KernelVersion struct {
Version int
PatchLevel int

View File

@ -27,7 +27,7 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
// validate stagingpath exists
ok := checkDirExists(req.GetStagingTargetPath())
if !ok {
return status.Errorf(codes.InvalidArgument, "staging path %s does not exists on node", req.GetStagingTargetPath())
return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath())
}
return nil
}

View File

@ -130,7 +130,7 @@ func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) {
nextFieldStartIdx := (10 + clusterIDLength + 1)
// minLenToDecode is now 17 as composedCSIID should include
// atleast 16 for poolID encoding and 1 for '-' separator.
// at least 16 for poolID encoding and 1 for '-' separator.
const minLenToDecode = 17
if bytesToProcess < minLenToDecode {
return errors.New("failed to decode CSI identifier, string underflow")

View File

@ -25,12 +25,12 @@ DriverInfo:
RequiredMountOption:
rw: {}
# Optional list of access modes required for provisiong. Default is RWO
# Optional list of access modes required for provisioning. Default is RWO
# RequiredAcccessModes:
# Map that represents the capabilities the driver supports
Capabilities:
# Data is persistest accross pod restarts
# Data is persistest across pod restarts
persistence: true
# Volume ownership via fsGroup

View File

@ -30,12 +30,12 @@ DriverInfo:
RequiredMountOption:
rw: {}
# Optional list of access modes required for provisiong. Default is RWO
# Optional list of access modes required for provisioning. Default is RWO
# RequiredAcccessModes:
# Map that represents the capabilities the driver supports
Capabilities:
# Data is persistest accross pod restarts
# Data is persistest across pod restarts
persistence: true
# Volume ownership via fsGroup

View File

@ -48,7 +48,7 @@ kubectl_retry() {
ret=$(grep -cvw 'AlreadyExists' "${stderr}")
if [ "${ret}" -eq 0 ]
then
# Succes! stderr is empty after removing all "AlreadyExists" lines.
# Success! stderr is empty after removing all "AlreadyExists" lines.
break
fi
fi