cleanup: fix mispell words

fixed mispell words in the repo.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2020-11-24 17:24:29 +05:30 committed by Madhu Rajanna
parent eee49a6e0a
commit 39b1f2b4d3
28 changed files with 51 additions and 51 deletions

View File

@ -179,7 +179,7 @@ containerized-test: .container-cmd .test-container-id
$(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes $(CONTAINER_CMD) run --rm -v $(CURDIR):/go/src/github.com/ceph/ceph-csi$(SELINUX_VOL_FLAG) $(CSI_IMAGE_NAME):test make $(TARGET) GIT_SINCE=$(GIT_SINCE) REBASE=$(REBASE) CONTAINERIZED=yes
ifeq ($(USE_PULLED_IMAGE),no) ifeq ($(USE_PULLED_IMAGE),no)
# create a (cached) container image with dependencied for building cephcsi # create a (cached) container image with dependencies for building cephcsi
.devel-container-id: .container-cmd scripts/Dockerfile.devel .devel-container-id: .container-cmd scripts/Dockerfile.devel
[ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel [ ! -f .devel-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):devel
$(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel . $(CONTAINER_CMD) build $(CPUSET) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t $(CSI_IMAGE_NAME):devel -f ./scripts/Dockerfile.devel .
@ -191,7 +191,7 @@ else
endif endif
ifeq ($(USE_PULLED_IMAGE),no) ifeq ($(USE_PULLED_IMAGE),no)
# create a (cached) container image with dependencied for testing cephcsi # create a (cached) container image with dependencies for testing cephcsi
.test-container-id: .container-cmd build.env scripts/Dockerfile.test .test-container-id: .container-cmd build.env scripts/Dockerfile.test
[ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test [ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test
$(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test . $(CONTAINER_CMD) build $(CPUSET) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test .

View File

@ -56,7 +56,7 @@ spec:
- "--drivername=cephfs.csi.ceph.com" - "--drivername=cephfs.csi.ceph.com"
# If topology based provisioning is desired, configure required # If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain # node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertize # and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain # its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone" # - "--domainlabels=failure-domain/region,failure-domain/zone"
env: env:

View File

@ -57,7 +57,7 @@ spec:
- "--drivername=rbd.csi.ceph.com" - "--drivername=rbd.csi.ceph.com"
# If topology based provisioning is desired, configure required # If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain # node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertize # and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain # its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone" # - "--domainlabels=failure-domain/region,failure-domain/zone"
env: env:

View File

@ -17,7 +17,7 @@
Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/) Refer [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter/)
for more information on these sidecar controllers. There should for more information on these sidecar controllers. There should
be a `volumesnapshotclass` object present in the cluster be a `volumesnapshotclass` object present in the cluster
for snapshot request to be satisified. for snapshot request to be satisfied.
- To install snapshot controller and CRD - To install snapshot controller and CRD

View File

@ -77,7 +77,7 @@ import (
less noisy if the dollar signs are omitted. Especially when the less noisy if the dollar signs are omitted. Especially when the
command doesn't list the output, but if the command follows output command doesn't list the output, but if the command follows output
we can use '$ ' (dollar+space) mainly to differentiate between we can use '$ ' (dollar+space) mainly to differentiate between
command and its ouput. command and its output.
scenario 1: when command doesn't follow output scenario 1: when command doesn't follow output

View File

@ -23,7 +23,7 @@ snapshot, Restore RBD snapshot and Create new RBD image from existing RBD image.
## Create a snapshot from PVC ## Create a snapshot from PVC
Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) Refer [snapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
for more information realted to Volume cloning in kubernetes. for more information related to Volume cloning in kubernetes.
### steps to create a snapshot ### steps to create a snapshot
@ -44,7 +44,7 @@ for more information realted to Volume cloning in kubernetes.
rbd snap ls <RBD image for src k8s volume> --all rbd snap ls <RBD image for src k8s volume> --all
// If the parent has more snapshots than the configured `maxsnapshotsonimage` // If the parent has more snapshots than the configured `maxsnapshotsonimage`
// add backgound tasks to flatten the temporary cloned images (temporary cloned // add background tasks to flatten the temporary cloned images (temporary cloned
// image names will be same as snapshot names) // image names will be same as snapshot names)
ceph rbd task add flatten <RBD image for temporary snap images> ceph rbd task add flatten <RBD image for temporary snap images>
@ -125,7 +125,7 @@ image(this will be applicable for both normal image and cloned image)
Refer Refer
[volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/) [volume-cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/)
for more information realted to Volume cloning in kubernetes. for more information related to Volume cloning in kubernetes.
### steps to create a Volume from Volume ### steps to create a Volume from Volume

View File

@ -21,7 +21,7 @@ it is **highly** encouraged to:
to set it to `1` as we need to build with go-ceph bindings. to set it to `1` as we need to build with go-ceph bindings.
* `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need * `GO111MODULE` is enabled by default, if `GO111MODULE` is set to `off` we need
to set it to `on` as cephcsi uses go modules for dependency. to set it to `on` as cephcsi uses go modules for dependency.
* Ceph-CSI uses the native Ceph libaries through the [go-ceph * Ceph-CSI uses the native Ceph libraries through the [go-ceph
package](https://github.com/ceph/go-ceph). It is required to install the package](https://github.com/ceph/go-ceph). It is required to install the
Ceph C headers in order to compile Ceph-CSI. The packages are called Ceph C headers in order to compile Ceph-CSI. The packages are called
`libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux `libcephfs-devel`, `librados-devel` and `librbd-devel` on many Linux
@ -97,7 +97,7 @@ make containerized-test TARGET=static-check
``` ```
In addition to running tests locally, each Pull Request that is created will In addition to running tests locally, each Pull Request that is created will
trigger Continous Integration tests that include the `containerized-test`, but trigger Continuous Integration tests that include the `containerized-test`, but
also additional functionality tests that are defined under the `e2e/` also additional functionality tests that are defined under the `e2e/`
directory. directory.

View File

@ -10,7 +10,7 @@
- [Create CephFS subvolume](#create-cephfs-subvolume) - [Create CephFS subvolume](#create-cephfs-subvolume)
- [Create CephFS static PV](#create-cephfs-static-pv) - [Create CephFS static PV](#create-cephfs-static-pv)
- [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv) - [Node stage secret ref in CephFS PV](#node-stage-secret-ref-in-cephfs-pv)
- [CephFS Volume Attributes in PV](#cephfs-volume-attributes-in-pv) - [CephFS volume attributes in PV](#cephfs-volume-attributes-in-pv)
- [Create CephFS static PVC](#create-cephfs-static-pvc) - [Create CephFS static PVC](#create-cephfs-static-pvc)
This document outlines how to create static PV and static PVC from This document outlines how to create static PV and static PVC from
@ -79,7 +79,7 @@ Below table explains the list of volume attributes can be set when creating a
static RBD PV static RBD PV
| Attributes | Description | Required | | Attributes | Description | Required |
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: | | :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes | | clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
| pool | The pool name in which rbd image is created | Yes | | pool | The pool name in which rbd image is created | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes | | staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | yes |
@ -122,7 +122,7 @@ $ kubectl create -f fs-static-pvc.yaml
persistentvolumeclaim/fs-static-pvc created persistentvolumeclaim/fs-static-pvc created
``` ```
**Note** deleting PV and PVC doesnot deleted the backend rbd image, user need to **Note** deleting PV and PVC does not removed the backend rbd image, user need to
manually delete the rbd image if required manually delete the rbd image if required
## CephFS static PVC ## CephFS static PVC
@ -202,7 +202,7 @@ Below table explains the list of volume attributes can be set when creating a
static CephFS PV static CephFS PV
| Attributes | Description | Required | | Attributes | Description | Required |
| :----------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------: | | :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes | | clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes | | fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes | | staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |

View File

@ -271,7 +271,7 @@ var _ = Describe("RBD", func() {
By("create a PVC and bind it to an app with normal user", func() { By("create a PVC and bind it to an app with normal user", func() {
err := validateNormalUserPVCAccess(pvcPath, f) err := validateNormalUserPVCAccess(pvcPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate normal user pvc and application bidning with error %v", err) e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err)
} }
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 0) validateRBDImageCount(f, 0)

View File

@ -85,7 +85,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
pvcName = "pvc-name" pvcName = "pvc-name"
namespace = f.UniqueName namespace = f.UniqueName
// minikube creates default class in cluster, we need to set dummy // minikube creates default class in cluster, we need to set dummy
// storageclass on PV and PVC to avoid storageclass name missmatch // storageclass on PV and PVC to avoid storageclass name mismatch
sc = "storage-class" sc = "storage-class"
) )
@ -176,7 +176,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
pvcName = "pvc-name" pvcName = "pvc-name"
namespace = f.UniqueName namespace = f.UniqueName
// minikube creates default storage class in cluster, we need to set dummy // minikube creates default storage class in cluster, we need to set dummy
// storageclass on PV and PVC to avoid storageclass name missmatch // storageclass on PV and PVC to avoid storageclass name mismatch
sc = "storage-class" sc = "storage-class"
secretName = "cephfs-static-pv-sc" // #nosec secretName = "cephfs-static-pv-sc" // #nosec
) )

View File

@ -354,7 +354,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// wait for application pod to come up after resize // wait for application pod to come up after resize
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout) err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timout waiting for pod to be in running state with error %v", err) e2elog.Failf("timeout waiting for pod to be in running state with error %v", err)
} }
// validate if resize is successful. // validate if resize is successful.
err = checkDirSize(app, f, &opt, pvcExpandSize) err = checkDirSize(app, f, &opt, pvcExpandSize)

View File

@ -185,7 +185,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// while cloning the volume the size is not populated properly to the new volume now. // while cloning the volume the size is not populated properly to the new volume now.
// it will be fixed in cephfs soon with the parentvolume size. Till then by below // it will be fixed in cephfs soon with the parentvolume size. Till then by below
// resize we are making sure we return or satisfy the requested size by setting the size // resize we are making sure we return or satisfy the requested size by setting the size
// explictly // explicitly
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size) err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
if err != nil { if err != nil {
purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false) purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false)

View File

@ -152,7 +152,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) { func parseTime(ctx context.Context, createTime string) (*timestamp.Timestamp, error) {
tm := &timestamp.Timestamp{} tm := &timestamp.Timestamp{}
layout := "2006-01-02 15:04:05.000000" layout := "2006-01-02 15:04:05.000000"
// TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failng // TODO currently parsing of timestamp to time.ANSIC generate from ceph fs is failing
var t time.Time var t time.Time
t, err := time.Parse(layout, createTime) t, err := time.Parse(layout, createTime)
if err != nil { if err != nil {

View File

@ -66,7 +66,7 @@ func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetIn
}, nil }, nil
} }
// NodeGetCapabilities returns RPC unknow capability. // NodeGetCapabilities returns RPC unknown capability.
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
util.TraceLog(ctx, "Using default NodeGetCapabilities") util.TraceLog(ctx, "Using default NodeGetCapabilities")

View File

@ -110,7 +110,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
} }
return true, nil return true, nil
} }
// as the temp clone doesnot exists,check snapshot exists on parent volume // as the temp clone does not exist,check snapshot exists on parent volume
// snapshot name is same as temporary clone image // snapshot name is same as temporary clone image
snap.RbdImageName = tempClone.RbdImageName snap.RbdImageName = tempClone.RbdImageName
err = parentVol.checkSnapExists(snap) err = parentVol.checkSnapExists(snap)

View File

@ -545,7 +545,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
if !errors.Is(err, ErrSnapNotFound) { if !errors.Is(err, ErrSnapNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error()) return nil, nil, status.Error(codes.Internal, err.Error())
} }
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot doesnot exists", snapshotID) return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
} }
return nil, rbdSnap, nil return nil, rbdSnap, nil
case *csi.VolumeContentSource_Volume: case *csi.VolumeContentSource_Volume:
@ -564,7 +564,7 @@ func checkContentSource(ctx context.Context, req *csi.CreateVolumeRequest, cr *u
if !errors.Is(err, ErrImageNotFound) { if !errors.Is(err, ErrImageNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error()) return nil, nil, status.Error(codes.Internal, err.Error())
} }
return nil, nil, status.Errorf(codes.NotFound, "%s image doesnot exists", volID) return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
} }
return rbdvol, nil, nil return rbdvol, nil, nil
} }

View File

@ -55,7 +55,7 @@ func (e errorPair) Unwrap() error {
} }
// JoinErrors combines two errors. Of the returned error, Is() follows the first // JoinErrors combines two errors. Of the returned error, Is() follows the first
// branch, Unwrap() folllows the second branch. // branch, Unwrap() follows the second branch.
func JoinErrors(e1, e2 error) error { func JoinErrors(e1, e2 error) error {
return errorPair{e1, e2} return errorPair{e1, e2}
} }

View File

@ -147,7 +147,7 @@ func GetKernelVersion() (string, error) {
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
} }
// KernelVersion holds kernel related informations. // KernelVersion holds kernel related information.
type KernelVersion struct { type KernelVersion struct {
Version int Version int
PatchLevel int PatchLevel int

View File

@ -27,7 +27,7 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
// validate stagingpath exists // validate stagingpath exists
ok := checkDirExists(req.GetStagingTargetPath()) ok := checkDirExists(req.GetStagingTargetPath())
if !ok { if !ok {
return status.Errorf(codes.InvalidArgument, "staging path %s does not exists on node", req.GetStagingTargetPath()) return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath())
} }
return nil return nil
} }

View File

@ -25,12 +25,12 @@ DriverInfo:
RequiredMountOption: RequiredMountOption:
rw: {} rw: {}
# Optional list of access modes required for provisiong. Default is RWO # Optional list of access modes required for provisioning. Default is RWO
# RequiredAcccessModes: # RequiredAcccessModes:
# Map that represents the capabilities the driver supports # Map that represents the capabilities the driver supports
Capabilities: Capabilities:
# Data is persistest accross pod restarts # Data is persistest across pod restarts
persistence: true persistence: true
# Volume ownership via fsGroup # Volume ownership via fsGroup

View File

@ -30,12 +30,12 @@ DriverInfo:
RequiredMountOption: RequiredMountOption:
rw: {} rw: {}
# Optional list of access modes required for provisiong. Default is RWO # Optional list of access modes required for provisioning. Default is RWO
# RequiredAcccessModes: # RequiredAcccessModes:
# Map that represents the capabilities the driver supports # Map that represents the capabilities the driver supports
Capabilities: Capabilities:
# Data is persistest accross pod restarts # Data is persistest across pod restarts
persistence: true persistence: true
# Volume ownership via fsGroup # Volume ownership via fsGroup

View File

@ -48,7 +48,7 @@ kubectl_retry() {
ret=$(grep -cvw 'AlreadyExists' "${stderr}") ret=$(grep -cvw 'AlreadyExists' "${stderr}")
if [ "${ret}" -eq 0 ] if [ "${ret}" -eq 0 ]
then then
# Succes! stderr is empty after removing all "AlreadyExists" lines. # Success! stderr is empty after removing all "AlreadyExists" lines.
break break
fi fi
fi fi