mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
cleanup: resolve nlreturn linter issues
nlreturn linter requires a new line before return and branch statements except when the return is alone inside a statement group (such as an if statement) to increase code clarity. This commit addresses such issues. Updates: #1586 Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
parent
5c016b4b94
commit
43f753760b
@ -37,6 +37,7 @@ func rbdNodePluginCaps(pool, rbdNamespace string) []string {
|
||||
} else {
|
||||
caps = append(caps, fmt.Sprintf("osd 'profile rbd pool=%s namespace=%s'", pool, rbdNamespace))
|
||||
}
|
||||
|
||||
return caps
|
||||
}
|
||||
|
||||
@ -50,6 +51,7 @@ func rbdProvisionerCaps(pool, rbdNamespace string) []string {
|
||||
} else {
|
||||
caps = append(caps, fmt.Sprintf("osd 'profile rbd pool=%s namespace=%s'", pool, rbdNamespace))
|
||||
}
|
||||
|
||||
return caps
|
||||
}
|
||||
|
||||
@ -62,6 +64,7 @@ func cephFSNodePluginCaps() []string {
|
||||
"osd", "'allow rw tag cephfs *=*'",
|
||||
"mds", "'allow rw'",
|
||||
}
|
||||
|
||||
return caps
|
||||
}
|
||||
|
||||
@ -71,6 +74,7 @@ func cephFSProvisionerCaps() []string {
|
||||
"mgr", "'allow rw'",
|
||||
"osd", "'allow rw tag cephfs metadata=*'",
|
||||
}
|
||||
|
||||
return caps
|
||||
}
|
||||
|
||||
@ -83,11 +87,13 @@ func createCephUser(f *framework.Framework, user string, caps []string) (string,
|
||||
if stdErr != "" {
|
||||
return "", fmt.Errorf("failed to create user %s with error %v", cmd, stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
}
|
||||
|
||||
func deleteCephUser(f *framework.Framework, user string) error {
|
||||
cmd := fmt.Sprintf("ceph auth del client.%s", user)
|
||||
_, _, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -165,6 +165,7 @@ func validateSubvolumePath(f *framework.Framework, pvcName, pvcNamespace, fileSy
|
||||
subVolumePath,
|
||||
subVolumePathInPV)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -354,6 +355,7 @@ var _ = Describe("cephfs", func() {
|
||||
fmt.Printf("Checking prefix on %s\n", subVol)
|
||||
if strings.HasPrefix(subVol.Name, volumeNamePrefix) {
|
||||
foundIt = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||
if stdOut != expectedGrpPath {
|
||||
return fmt.Errorf("error unexpected group path. Found: %s", stdOut)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -84,6 +85,7 @@ func createCephfsStorageClass(
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -102,6 +104,7 @@ func createCephfsSecret(f *framework.Framework, secretName, userName, userKey st
|
||||
delete(sc.StringData, "userKey")
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = f.ClientSet.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -110,6 +113,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pod: %w", err)
|
||||
}
|
||||
pvc, err := f.ClientSet.CoreV1().
|
||||
@ -117,6 +121,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pvc: %w", err)
|
||||
}
|
||||
cmd := fmt.Sprintf(
|
||||
@ -133,6 +138,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -150,6 +156,7 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error deleting backing volume %s %v", imageData.imageName, stdErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -174,6 +181,7 @@ func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string)
|
||||
if err != nil {
|
||||
return subVols, err
|
||||
}
|
||||
|
||||
return subVols, nil
|
||||
}
|
||||
|
||||
@ -187,6 +195,7 @@ func getSubvolumePath(f *framework.Framework, filesystem, subvolgrp, subvolume s
|
||||
if stdErr != "" {
|
||||
return "", fmt.Errorf("failed to getpath for subvolume %s with error %s", subvolume, stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
}
|
||||
|
||||
@ -211,6 +220,7 @@ func getSnapName(snapNamespace, snapName string) (string, error) {
|
||||
snapID := snapIDRegex.FindString(*sc.Status.SnapshotHandle)
|
||||
snapshotName := fmt.Sprintf("csi-snap-%s", snapID)
|
||||
e2elog.Logf("snapshotName= %s", snapshotName)
|
||||
|
||||
return snapshotName, nil
|
||||
}
|
||||
|
||||
@ -239,5 +249,6 @@ func deleteBackingCephFSSubvolumeSnapshot(
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error deleting backing snapshot %s %v", snapshotName, stdErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ func deleteConfigMap(pluginPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -120,5 +121,6 @@ func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpI
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update configmap: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -104,5 +104,6 @@ func (vc *vaultConfig) getPassphrase(f *framework.Framework, key string) (string
|
||||
LabelSelector: "app=vault",
|
||||
}
|
||||
stdOut, stdErr := execCommandInPodAndAllowFail(f, cmd, cephCSINamespace, &opt)
|
||||
|
||||
return strings.TrimSpace(stdOut), strings.TrimSpace(stdErr)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ func logsCSIPods(label string, c clientset.Interface) {
|
||||
podList, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to list pods with selector %s %v", label, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -72,5 +73,6 @@ func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName
|
||||
if strings.Contains(string(logs), "Internal Error") {
|
||||
return "", fmt.Errorf("fetched log contains \"Internal Error\": %q", string(logs))
|
||||
}
|
||||
|
||||
return string(logs), err
|
||||
}
|
||||
|
@ -37,8 +37,10 @@ func createNamespace(c kubernetes.Interface, name string) error {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
@ -49,6 +51,7 @@ func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete namespace: %w", err)
|
||||
}
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -59,8 +62,10 @@ func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -70,5 +75,6 @@ func replaceNamespaceInTemplate(filePath string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.ReplaceAll(string(read), "namespace: default", fmt.Sprintf("namespace: %s", cephCSINamespace)), nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error
|
||||
for i := range nodes.Items {
|
||||
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -32,6 +33,7 @@ func deleteNodeLabel(c kubernetes.Interface, labelKey string) error {
|
||||
for i := range nodes.Items {
|
||||
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -43,6 +45,7 @@ func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) erro
|
||||
for i := range nodes.Items {
|
||||
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
28
e2e/pod.go
28
e2e/pod.go
@ -24,14 +24,17 @@ func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string)
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonSetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting daemonsets with name %s in namespace %s", daemonSetName, ns)
|
||||
|
||||
return "", err
|
||||
}
|
||||
s, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
e2elog.Logf("Error parsing %s daemonset selector in namespace %s", daemonSetName, ns)
|
||||
|
||||
return "", err
|
||||
}
|
||||
e2elog.Logf("LabelSelector for %s daemonsets in namespace %s: %s", daemonSetName, ns, s.String())
|
||||
|
||||
return s.String(), nil
|
||||
}
|
||||
|
||||
@ -50,6 +53,7 @@ func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
dNum := ds.Status.DesiredNumberScheduled
|
||||
@ -85,6 +89,7 @@ func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) e
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("deployment error: %v", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
@ -100,6 +105,7 @@ func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) e
|
||||
deployment.Status.Replicas,
|
||||
deployment.Status.ReadyReplicas)
|
||||
reason = fmt.Sprintf("deployment status: %#v", deployment.Status.String())
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
@ -109,6 +115,7 @@ func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) e
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %w", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -130,8 +137,10 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", errors.New("container name not found")
|
||||
}
|
||||
|
||||
return podList.Items[0].Name, podList.Items[0].Spec.Containers[0].Name, nil
|
||||
}
|
||||
|
||||
@ -144,6 +153,7 @@ func getCommandInPodOpts(
|
||||
if err != nil {
|
||||
return framework.ExecOptions{}, err
|
||||
}
|
||||
|
||||
return framework.ExecOptions{
|
||||
Command: cmd,
|
||||
PodName: pName,
|
||||
@ -192,6 +202,7 @@ func execCommandInDaemonsetPod(
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
}
|
||||
|
||||
return f.ExecWithOptions(podOpt)
|
||||
}
|
||||
|
||||
@ -201,6 +212,7 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.
|
||||
if len(podList.Items) == 0 {
|
||||
return podList.Items, fmt.Errorf("podlist for label '%s' in namespace %s is empty", opt.LabelSelector, ns)
|
||||
}
|
||||
|
||||
return podList.Items, err
|
||||
}
|
||||
|
||||
@ -213,6 +225,7 @@ func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOpti
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
}
|
||||
|
||||
@ -226,6 +239,7 @@ func execCommandInContainer(
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
}
|
||||
|
||||
@ -241,6 +255,7 @@ func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, stri
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
}
|
||||
|
||||
@ -253,6 +268,7 @@ func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *met
|
||||
if err != nil {
|
||||
e2elog.Logf("command %s failed: %v", c, err)
|
||||
}
|
||||
|
||||
return stdOut, stdErr
|
||||
}
|
||||
|
||||
@ -264,6 +280,7 @@ func loadApp(path string) (*v1.Pod, error) {
|
||||
for i := range app.Spec.Containers {
|
||||
app.Spec.Containers[i].ImagePullPolicy = v1.PullIfNotPresent
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
@ -272,6 +289,7 @@ func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create app: %w", err)
|
||||
}
|
||||
|
||||
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout, noError)
|
||||
}
|
||||
|
||||
@ -280,6 +298,7 @@ func createAppErr(c kubernetes.Interface, app *v1.Pod, timeout int, errString st
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout, errString)
|
||||
}
|
||||
|
||||
@ -287,12 +306,14 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Running state", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get app: %w", err)
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
@ -310,6 +331,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
||||
}
|
||||
if strings.Contains(events.String(), expectedError) {
|
||||
e2elog.Logf("Expected Error %q found successfully", expectedError)
|
||||
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
@ -320,6 +342,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
||||
pod.Status.Phase,
|
||||
int(time.Since(start).Seconds()))
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -332,6 +355,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
}
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting for pod %v to be deleted", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -342,8 +366,10 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to get app: %w", err)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -353,6 +379,7 @@ func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete pod %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -369,5 +396,6 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
|
||||
// extract checksum from sha512sum output.
|
||||
checkSum := strings.Split(sha512sumOut, "")[0]
|
||||
e2elog.Logf("Calculated checksum %s", checkSum)
|
||||
|
||||
return checkSum, nil
|
||||
}
|
||||
|
16
e2e/pvc.go
16
e2e/pvc.go
@ -23,6 +23,7 @@ func loadPVC(path string) (*v1.PersistentVolumeClaim, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pvc, err
|
||||
}
|
||||
|
||||
@ -53,6 +54,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get pvc: %w", err)
|
||||
}
|
||||
|
||||
@ -68,6 +70,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get pv: %w", err)
|
||||
}
|
||||
err = e2epv.WaitOnPVandPVC(
|
||||
@ -79,6 +82,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to wait for the pv and pvc to bind: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
@ -92,6 +96,7 @@ func createPVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pv: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -125,6 +130,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
// FIXME: see https://github.com/ceph/ceph-csi/issues/1874
|
||||
e2elog.Logf("PVC %s is in a weird state: %s", pvcToDelete.Name, pvcToDelete.String())
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
if isRetryableAPIError(err) {
|
||||
@ -145,6 +151,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
|
||||
start = time.Now()
|
||||
pvToDelete := pv
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PV is deleted.
|
||||
e2elog.Logf(
|
||||
@ -179,6 +186,7 @@ func getPVCAndPV(
|
||||
if err != nil {
|
||||
return pvc, nil, fmt.Errorf("failed to get PV: %w", err)
|
||||
}
|
||||
|
||||
return pvc, pv, nil
|
||||
}
|
||||
|
||||
@ -203,6 +211,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
return fmt.Errorf("delete of PVC %v failed: %w", name, err)
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PVC is really deleted.
|
||||
e2elog.Logf(
|
||||
@ -252,6 +261,7 @@ func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pv: %w", err)
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
@ -289,6 +299,7 @@ func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolum
|
||||
return errors.New("unexpected key in node selector terms found in PV")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -303,14 +314,17 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
|
||||
|
||||
// retry as kubelet does not immediately have the metrics available
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
if stdOut == "" {
|
||||
e2elog.Logf("no metrics received from kublet on IP %s", kubelet)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -324,11 +338,13 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
|
||||
if strings.Contains(line, namespace) && strings.Contains(line, name) {
|
||||
// TODO: validate metrics if possible
|
||||
e2elog.Logf("found metrics for pvc %s/%s: %s", pvc.Namespace, pvc.Name, line)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("no metrics found for pvc %s/%s", pvc.Namespace, pvc.Name)
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
@ -522,9 +522,11 @@ var _ = Describe("RBD", func() {
|
||||
reason = fmt.Sprintf("failed to run ps cmd : %v, stdErr: %v", err, stdErr)
|
||||
}
|
||||
e2elog.Logf("%s", reason)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("attach command running after restart, runningAttachCmd: %v", runningAttachCmd)
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
@ -1213,6 +1215,7 @@ var _ = Describe("RBD", func() {
|
||||
fmt.Printf("Checking prefix on %s\n", imgName)
|
||||
if strings.HasPrefix(imgName, volumeNamePrefix) {
|
||||
foundIt = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ func imageSpec(pool, image string) string {
|
||||
if radosNamespace != "" {
|
||||
return pool + "/" + radosNamespace + "/" + image
|
||||
}
|
||||
|
||||
return pool + "/" + image
|
||||
}
|
||||
|
||||
@ -37,6 +38,7 @@ func rbdOptions(pool string) string {
|
||||
if radosNamespace != "" {
|
||||
return "--pool=" + pool + " --namespace " + radosNamespace
|
||||
}
|
||||
|
||||
return "--pool=" + pool
|
||||
}
|
||||
|
||||
@ -92,6 +94,7 @@ func createRBDStorageClass(
|
||||
}
|
||||
sc.ReclaimPolicy = &policy
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -133,6 +136,7 @@ func createRadosNamespace(f *framework.Framework) error {
|
||||
return fmt.Errorf("error creating rbd namespace %v", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -149,6 +153,7 @@ func createRBDSecret(f *framework.Framework, secretName, userName, userKey strin
|
||||
sc.StringData["userKey"] = userKey
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = f.ClientSet.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -184,6 +189,7 @@ func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (
|
||||
csiVolumeHandle: pv.Spec.CSI.VolumeHandle,
|
||||
pvName: pv.Name,
|
||||
}
|
||||
|
||||
return imageData, nil
|
||||
}
|
||||
|
||||
@ -196,6 +202,7 @@ func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string,
|
||||
if stdErr != "" {
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
}
|
||||
|
||||
@ -262,6 +269,7 @@ func logErrors(f *framework.Framework, msg string, wgErrs []error) int {
|
||||
failures++
|
||||
}
|
||||
}
|
||||
|
||||
return failures
|
||||
}
|
||||
|
||||
@ -388,6 +396,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
|
||||
validateRBDImageCount(f, 0, snapshotPool)
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateRBDImageCount(f, 0, destImagePool)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -427,6 +436,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f
|
||||
return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -457,6 +467,7 @@ func isThickPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.P
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to validate thick image: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -474,6 +485,7 @@ func validateThickImageMetadata(f *framework.Framework, pvc *v1.PersistentVolume
|
||||
if thickState == "" {
|
||||
return fmt.Errorf("image metadata is set for %s", rbdImageSpec)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -518,6 +530,7 @@ func listRBDImages(f *framework.Framework, pool string) ([]string, error) {
|
||||
if err != nil {
|
||||
return imgInfos, err
|
||||
}
|
||||
|
||||
return imgInfos, nil
|
||||
}
|
||||
|
||||
@ -529,6 +542,7 @@ func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
||||
|
||||
cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName)
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -586,6 +600,7 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
||||
|
||||
cmd := fmt.Sprintf("rbd sparsify %s %s", rbdOptions(defaultRBDPool), imageData.imageName)
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -615,6 +630,7 @@ func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -632,6 +648,7 @@ func createPool(f *framework.Framework, name string) error {
|
||||
// ceph osd pool set replicapool size 1
|
||||
cmd = fmt.Sprintf("ceph osd pool set %s size %d --yes-i-really-mean-it", name, size)
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -876,6 +893,7 @@ func listRBDImagesInTrash(f *framework.Framework, poolName string) ([]trashInfo,
|
||||
if err != nil {
|
||||
return trashInfos, err
|
||||
}
|
||||
|
||||
return trashInfos, nil
|
||||
}
|
||||
|
||||
@ -892,11 +910,13 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
||||
}
|
||||
errReason = fmt.Errorf("found %d images found in trash. Image details %v", len(imagesInTrash), imagesInTrash)
|
||||
e2elog.Logf(errReason.Error())
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if errors.Is(err, wait.ErrWaitTimeout) {
|
||||
err = errReason
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Resized state", pvc)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", updatedPVC.Name, int(time.Since(start).Seconds()))
|
||||
updatedPVC, err = c.CoreV1().
|
||||
@ -46,6 +47,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get pvc: %w", err)
|
||||
}
|
||||
pvcConditions := updatedPVC.Status.Conditions
|
||||
@ -62,8 +64,10 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
"current size in status %v,expected size %v",
|
||||
updatedPVC.Status.Capacity[v1.ResourceStorage],
|
||||
resource.MustParse(size))
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
@ -143,16 +147,19 @@ func resizePVCAndValidateSize(pvcPath, appPath string, f *framework.Framework) e
|
||||
}
|
||||
}
|
||||
err = deletePVCAndApp("", f, resizePvc, app)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func checkDirSize(app *v1.Pod, f *framework.Framework, opt *metav1.ListOptions, size string) error {
|
||||
cmd := getDirSizeCheckCmd(app.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
|
||||
return checkAppMntSize(f, opt, size, cmd, app.Namespace, deployTimeout)
|
||||
}
|
||||
|
||||
func checkDeviceSize(app *v1.Pod, f *framework.Framework, opt *metav1.ListOptions, size string) error {
|
||||
cmd := getDeviceSizeCheckCmd(app.Spec.Containers[0].VolumeDevices[0].DevicePath)
|
||||
|
||||
return checkAppMntSize(f, opt, size, cmd, app.Namespace, deployTimeout)
|
||||
}
|
||||
|
||||
@ -176,6 +183,7 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
||||
}
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("failed to execute command in app pod %v", stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
s := resource.MustParse(strings.TrimSpace(output))
|
||||
@ -190,8 +198,10 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
||||
}
|
||||
if actualSize != expectedSize {
|
||||
e2elog.Logf("expected size %s found %s information", size, output)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
|
||||
sc := snapapi.VolumeSnapshotClass{}
|
||||
err := unmarshal(path, &sc)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
@ -27,6 +28,7 @@ func getSnapshot(path string) snapapi.VolumeSnapshot {
|
||||
sc := snapapi.VolumeSnapshot{}
|
||||
err := unmarshal(path, &sc)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
@ -39,6 +41,7 @@ func newSnapshotClient() (*snapclient.SnapshotV1Client, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating snapshot client: %w", err)
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
||||
@ -74,6 +77,7 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get volumesnapshot: %w", err)
|
||||
}
|
||||
if snaps.Status == nil || snaps.Status.ReadyToUse == nil {
|
||||
@ -83,6 +87,7 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("snapshot %s in %v state", snap.Name, *snaps.Status.ReadyToUse)
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -149,6 +154,7 @@ func createRBDSnapshotClass(f *framework.Framework) error {
|
||||
return err
|
||||
}
|
||||
_, err = sclient.VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -160,6 +166,7 @@ func deleteRBDSnapshotClass() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sclient.VolumeSnapshotClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
@ -185,6 +192,7 @@ func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create volumesnapshotclass: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -207,5 +215,6 @@ func getVolumeSnapshotContent(namespace, snapshotName string) (*snapapi.VolumeSn
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get volumesnapshotcontent: %w", err)
|
||||
}
|
||||
|
||||
return volumeSnapshotContent, nil
|
||||
}
|
||||
|
@ -184,6 +184,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
|
||||
|
||||
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ func upgradeCSI(version string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to switch directory : %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -38,5 +39,6 @@ func upgradeAndDeployCSI(version, testtype string) error {
|
||||
default:
|
||||
return errors.New("incorrect test type, can be cephfs/rbd")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
26
e2e/utils.go
26
e2e/utils.go
@ -83,12 +83,14 @@ func getMons(ns string, c kubernetes.Interface) ([]string, error) {
|
||||
svcList.Items[i].Spec.Ports[0].Port)
|
||||
services = append(services, s)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getStorageClass(path string) (scv1.StorageClass, error) {
|
||||
sc := scv1.StorageClass{}
|
||||
err := unmarshal(path, &sc)
|
||||
|
||||
return sc, err
|
||||
}
|
||||
|
||||
@ -102,6 +104,7 @@ func getSecret(path string) (v1.Secret, error) {
|
||||
return sc, err
|
||||
}
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
@ -114,6 +117,7 @@ func deleteResource(scPath string) error {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete %s %v", scPath, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -128,6 +132,7 @@ func unmarshal(fileName string, obj interface{}) error {
|
||||
}
|
||||
|
||||
err = json.Unmarshal(data, obj)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -149,6 +154,7 @@ func createPVCAndApp(
|
||||
return err
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -166,6 +172,7 @@ func deletePVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolu
|
||||
return err
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -199,6 +206,7 @@ func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) e
|
||||
return err
|
||||
}
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -214,6 +222,7 @@ func getMountType(appName, appNamespace, mountPath string, f *framework.Framewor
|
||||
if stdErr != "" {
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
}
|
||||
|
||||
@ -306,6 +315,7 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
||||
}
|
||||
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -393,6 +403,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -429,6 +440,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
||||
}
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -471,6 +483,7 @@ func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFla
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -481,6 +494,7 @@ func addTopologyDomainsToDSYaml(template, labels string) string {
|
||||
|
||||
func oneReplicaDeployYaml(template string) string {
|
||||
re := regexp.MustCompile(`(\s+replicas:) \d+`)
|
||||
|
||||
return re.ReplaceAllString(template, `$1 1`)
|
||||
}
|
||||
|
||||
@ -494,12 +508,14 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.
|
||||
err := writeDataInPod(app, opt, f)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to write data in the pod with error %v", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
checkSum, err := calculateSHA512sum(f, app, filePath, opt)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to calculate checksum with error %v", err)
|
||||
|
||||
return checkSum, err
|
||||
}
|
||||
|
||||
@ -507,6 +523,7 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pod with error %v", err)
|
||||
}
|
||||
|
||||
return checkSum, nil
|
||||
}
|
||||
|
||||
@ -1131,6 +1148,7 @@ func validateController(f *framework.Framework, pvcPath, appPath, scPath string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
}
|
||||
|
||||
@ -1172,6 +1190,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to get Job: %w", err)
|
||||
}
|
||||
|
||||
@ -1183,6 +1202,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
|
||||
e2elog.Logf(
|
||||
"Job %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, job, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -1211,6 +1231,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
e2elog.Logf("waiting for kubectl (%s) to finish", action)
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := framework.RunKubectlInput(namespace, data, string(action), "-f", "-")
|
||||
if err != nil {
|
||||
@ -1221,8 +1242,10 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
||||
"will run kubectl (%s) again (%d seconds elapsed)",
|
||||
action,
|
||||
int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
@ -1234,6 +1257,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
e2elog.Logf("waiting for kubectl (%s -f %q) to finish", action, filename)
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := framework.RunKubectl(namespace, string(action), "-f", filename)
|
||||
if err != nil {
|
||||
@ -1245,8 +1269,10 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
action,
|
||||
filename,
|
||||
int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
@ -27,12 +27,14 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
volumes, err := fsa.EnumerateVolumes()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -43,6 +45,7 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
}
|
||||
|
||||
util.ErrorLog(ctx, "failed to list volume %s", vo.FsName)
|
||||
|
||||
return 0, ErrVolumeNotFound
|
||||
}
|
||||
|
||||
@ -50,12 +53,14 @@ func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
fsPoolInfos, err := fsa.ListFileSystems()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list filesystems, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -72,12 +77,14 @@ func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumes, err := fsa.EnumerateVolumes()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -56,6 +56,7 @@ func (cs cephFSCloneState) toError() error {
|
||||
case cephFSCloneFailed:
|
||||
return ErrCloneFailed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -64,6 +65,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
err := parentvolOpt.createSnapshot(ctx, snapshotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
var (
|
||||
@ -100,29 +102,34 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID)
|
||||
if protectErr != nil {
|
||||
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
|
||||
|
||||
return protectErr
|
||||
}
|
||||
|
||||
cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt)
|
||||
if cloneErr != nil {
|
||||
util.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID)
|
||||
if cloneErr != nil {
|
||||
util.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
if cloneState != cephFSCloneComplete {
|
||||
util.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError())
|
||||
|
||||
return cloneState.toError()
|
||||
}
|
||||
// This is a work around to fix sizing issue for cloned images
|
||||
err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
// As we completed clone, remove the intermediate snap
|
||||
@ -132,13 +139,16 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
// ahead with deletion
|
||||
if !errors.Is(err, ErrSnapProtectionExist) {
|
||||
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -154,6 +164,7 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -161,14 +172,17 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -201,6 +215,7 @@ func createCloneFromSnapshot(
|
||||
cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get clone state: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -213,8 +228,10 @@ func createCloneFromSnapshot(
|
||||
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -227,12 +244,14 @@ func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cep
|
||||
vo.FsName,
|
||||
string(volID),
|
||||
err)
|
||||
|
||||
return cephFSCloneError, err
|
||||
}
|
||||
|
||||
cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err)
|
||||
|
||||
return cephFSCloneError, err
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
if sID != nil {
|
||||
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
|
||||
@ -67,13 +68,16 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
if parentVolOpt != nil {
|
||||
if err = cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
|
||||
@ -85,15 +89,19 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
util.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -113,8 +121,10 @@ func checkContentSource(
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
||||
return nil, nil, nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return volOpt, nil, sid, nil
|
||||
case *csi.VolumeContentSource_Volume:
|
||||
// Find the volume using the provided VolumeID
|
||||
@ -124,11 +134,13 @@ func checkContentSource(
|
||||
if !errors.Is(err, ErrVolumeNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
||||
return nil, nil, nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return parentVol, pvID, nil, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source %v", volumeSource)
|
||||
}
|
||||
|
||||
@ -139,6 +151,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||
util.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -149,6 +162,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
@ -156,6 +170,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// Existence and conflict checks
|
||||
if acquired := cs.VolumeLocks.TryAcquire(requestName); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, requestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, requestName)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(requestName)
|
||||
@ -163,6 +178,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
volOptions, err := newVolumeOptions(ctx, requestName, req, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
@ -185,6 +201,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if isCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// TODO return error message if requested vol size greater than found volume return error
|
||||
@ -211,6 +228,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
requestName, errUndo)
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(vID.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -231,6 +249,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{Volume: volume}, nil
|
||||
}
|
||||
|
||||
@ -258,6 +277,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if isCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -273,10 +293,12 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// set err=nil so that when we get the request again we can get
|
||||
// the subvolume info.
|
||||
err = nil
|
||||
|
||||
return nil, status.Error(codes.Internal, purgeErr.Error())
|
||||
}
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to get subvolume path %s: %v", vID.FsSubvolName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -299,6 +321,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{Volume: volume}, nil
|
||||
}
|
||||
|
||||
@ -308,6 +331,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
if err := cs.validateDeleteVolumeRequest(); err != nil {
|
||||
util.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -317,6 +341,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// lock out parallel delete operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
|
||||
}
|
||||
defer cs.VolumeLocks.Release(string(volID))
|
||||
@ -324,6 +349,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// lock out volumeID for clone and expand operation
|
||||
if err := cs.OperationLocks.GetDeleteLock(req.GetVolumeId()); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId())
|
||||
@ -335,6 +361,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// need to worry about deleting subvolume or omap data, return success
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend volume for %s: %v", string(volID), err)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
||||
@ -362,6 +389,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if err = undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
@ -377,6 +405,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
cr, err := util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
@ -412,6 +441,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{
|
||||
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
|
||||
VolumeCapabilities: req.VolumeCapabilities,
|
||||
@ -425,6 +455,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
if err := cs.validateExpandVolumeRequest(req); err != nil {
|
||||
util.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -434,6 +465,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
// lock out parallel delete operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(volID)
|
||||
@ -441,6 +473,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
// lock out volumeID for clone and delete operation
|
||||
if err := cs.OperationLocks.GetExpandLock(volID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseExpandLock(volID)
|
||||
@ -454,6 +487,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
@ -462,6 +496,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
|
||||
if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -496,12 +531,14 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// Existence and conflict checks
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(requestName); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, requestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, requestName)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(requestName)
|
||||
|
||||
if err = cs.OperationLocks.GetSnapshotCreateLock(sourceVolID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
@ -512,12 +549,14 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
|
||||
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
||||
if errors.Is(err, ErrVolumeNotFound) {
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer parentVolOptions.Destroy()
|
||||
@ -538,6 +577,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// lock out parallel snapshot create operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(sourceVolID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(sourceVolID)
|
||||
@ -569,6 +609,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
requestName, errDefer)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -613,6 +654,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: info.BytesQuota,
|
||||
@ -631,6 +673,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
err := volOpt.createSnapshot(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
defer func() {
|
||||
@ -644,6 +687,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
|
||||
|
||||
return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID)
|
||||
}
|
||||
var t *timestamp.Timestamp
|
||||
@ -656,6 +700,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
|
||||
}
|
||||
|
||||
return snap, err
|
||||
}
|
||||
|
||||
@ -663,6 +708,7 @@ func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.Cr
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -685,6 +731,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -700,6 +747,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(snapshotID)
|
||||
@ -707,6 +755,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// lock out snapshotID for restore operation
|
||||
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
||||
@ -718,6 +767,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// if error is ErrPoolNotFound, the pool is already deleted we dont
|
||||
// need to worry about deleting snapshot or omap data, return success
|
||||
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
case errors.Is(err, util.ErrKeyNotFound):
|
||||
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
||||
@ -729,8 +779,10 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
case errors.Is(err, ErrVolumeNotFound):
|
||||
// if the error is ErrVolumeNotFound, the subvolume is already deleted
|
||||
@ -740,8 +792,10 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -753,6 +807,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// name
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(sid.RequestName); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, sid.RequestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sid.RequestName)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(sid.RequestName)
|
||||
@ -774,6 +829,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
|
@ -97,8 +97,10 @@ func checkVolExists(ctx context.Context,
|
||||
}
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if cloneState == cephFSCloneInprogress {
|
||||
@ -111,6 +113,7 @@ func checkVolExists(ctx context.Context,
|
||||
err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if pvID != nil {
|
||||
@ -124,6 +127,7 @@ func checkVolExists(ctx context.Context,
|
||||
}
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if cloneState != cephFSCloneComplete {
|
||||
@ -147,8 +151,10 @@ func checkVolExists(ctx context.Context,
|
||||
}
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -327,6 +333,7 @@ func undoSnapReservation(
|
||||
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, vid.FsSnapshotName, snapName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -374,8 +381,10 @@ func checkSnapExists(
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, snapID, snap.RequestName)
|
||||
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@ -384,6 +393,7 @@ func checkSnapExists(
|
||||
err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
|
||||
|
||||
return
|
||||
}
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
|
@ -82,6 +82,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
|
||||
}
|
||||
defer ns.VolumeLocks.Release(req.GetVolumeId())
|
||||
@ -114,11 +115,13 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -139,6 +142,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
cr, err := getCredentialsForVolume(volOptions, req)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
@ -146,6 +150,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -176,6 +181,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
"failed to mount volume %s: %v Check dmesg logs if required.",
|
||||
volID,
|
||||
err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !csicommon.MountOptionContains(kernelMountOptions, readOnly) &&
|
||||
@ -198,9 +204,11 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
volID,
|
||||
uErr)
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -222,6 +230,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
if err := util.CreateMountPoint(targetPath); err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -236,11 +245,13 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -248,6 +259,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
|
||||
util.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -272,14 +284,17 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
if os.IsNotExist(err) {
|
||||
// targetPath has already been deleted
|
||||
util.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !isMnt {
|
||||
if err = os.RemoveAll(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -310,6 +325,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
volID := req.GetVolumeId()
|
||||
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
defer ns.VolumeLocks.Release(volID)
|
||||
@ -321,8 +337,10 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
if os.IsNotExist(err) {
|
||||
// targetPath has already been deleted
|
||||
util.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !isMnt {
|
||||
@ -370,6 +388,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
targetPath := req.GetVolumePath()
|
||||
if targetPath == "" {
|
||||
err = fmt.Errorf("targetpath %v is empty", targetPath)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
|
@ -52,6 +52,7 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -59,8 +60,10 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -68,6 +71,7 @@ func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volum
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -75,8 +79,10 @@ func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volum
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,6 +98,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
|
||||
@ -107,11 +114,13 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
snap.CreatedAt = info.CreatedAt.Time
|
||||
snap.HasPendingClones = info.HasPendingClones
|
||||
snap.Protected = info.Protected
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
@ -124,6 +133,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -140,8 +150,10 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -154,6 +166,7 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -172,8 +185,10 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -185,6 +200,7 @@ func (vo *volumeOptions) cloneSnapshot(
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
co := &admin.CloneOptions{
|
||||
@ -207,7 +223,9 @@ func (vo *volumeOptions) cloneSnapshot(
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return ErrVolumeNotFound
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ type volumeID string
|
||||
|
||||
func execCommandErr(ctx context.Context, program string, args ...string) error {
|
||||
_, _, err := util.ExecCommand(ctx, program, args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -91,6 +92,7 @@ func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeReq
|
||||
return status.Error(codes.InvalidArgument, "unsupported volume data source")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -129,11 +131,13 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
|
||||
cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if namePrefix, ok := snapOptions["snapshotNamePrefix"]; ok {
|
||||
cephfsSnap.NamePrefix = namePrefix
|
||||
}
|
||||
|
||||
return cephfsSnap, nil
|
||||
}
|
||||
|
||||
@ -141,7 +145,9 @@ func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp,
|
||||
tm, err := ptypes.TimestampProto(createTime)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)
|
||||
|
||||
return tm, err
|
||||
}
|
||||
|
||||
return tm, nil
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin err %s", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
@ -68,8 +69,10 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return "", util.JoinErrors(ErrVolumeNotFound, err)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
return svPath, nil
|
||||
}
|
||||
|
||||
@ -77,6 +80,7 @@ func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -147,6 +151,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
ca, err := volOptions.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -161,6 +166,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
volOptions.SubvolumeGroup,
|
||||
string(volID),
|
||||
err)
|
||||
|
||||
return err
|
||||
}
|
||||
util.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
|
||||
@ -179,6 +185,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -203,21 +210,25 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
_, err = fsa.ResizeSubVolume(vo.FsName, vo.SubvolumeGroup, string(volID), fsAdmin.ByteCount(bytesQuota), true)
|
||||
if err == nil {
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState = supported
|
||||
|
||||
return nil
|
||||
}
|
||||
var invalid fsAdmin.NotImplementedError
|
||||
// In case the error is other than invalid command return error to the caller.
|
||||
if !errors.As(err, &invalid) {
|
||||
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState = unsupported
|
||||
|
||||
return createVolume(ctx, vo, volID, bytesQuota)
|
||||
}
|
||||
|
||||
@ -225,6 +236,7 @@ func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin %s:", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -244,6 +256,7 @@ func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return util.JoinErrors(ErrVolumeNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -260,5 +273,6 @@ func checkSubvolumeHasFeature(feature string, subVolFeatures []string) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -123,6 +123,7 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
||||
for _, availMounter := range availableMounters {
|
||||
if availMounter == wantMounter {
|
||||
chosenMounter = wantMounter
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -235,6 +236,7 @@ func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, v
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -275,6 +277,7 @@ func unmountVolume(ctx context.Context, mountPoint string) error {
|
||||
strings.Contains(err.Error(), "No such file or directory") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,7 @@ func (vo *volumeOptions) Connect(cr *util.Credentials) error {
|
||||
}
|
||||
|
||||
vo.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -98,6 +99,7 @@ func extractOptionalOption(dest *string, optionLabel string, options map[string]
|
||||
}
|
||||
|
||||
*dest = opt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -112,6 +114,7 @@ func extractOption(dest *string, optionLabel string, options map[string]string)
|
||||
}
|
||||
|
||||
*dest = opt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -144,6 +147,7 @@ func getClusterInformation(options map[string]string) (*util.ClusterInfo, error)
|
||||
clusterID, ok := options["clusterID"]
|
||||
if !ok {
|
||||
err := fmt.Errorf("clusterID must be set")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -154,12 +158,14 @@ func getClusterInformation(options map[string]string) (*util.ClusterInfo, error)
|
||||
monitors, err := util.Mons(util.CsiConfigFile, clusterID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to fetch monitor list using clusterID (%s): %w", clusterID, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subvolumeGroup, err := util.CephFSSubvolumeGroup(util.CsiConfigFile, clusterID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to fetch subvolumegroup using clusterID (%s): %w", clusterID, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
clusterData := &util.ClusterInfo{
|
||||
@ -167,6 +173,7 @@ func getClusterInformation(options map[string]string) (*util.ClusterInfo, error)
|
||||
Monitors: strings.Split(monitors, ","),
|
||||
}
|
||||
clusterData.CephFS.SubvolumeGroup = subvolumeGroup
|
||||
|
||||
return clusterData, nil
|
||||
}
|
||||
|
||||
@ -265,6 +272,7 @@ func newVolumeOptionsFromVolID(
|
||||
err := vi.DecomposeCSIID(volID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error decoding volume ID (%s): %w", volID, err)
|
||||
|
||||
return nil, nil, util.JoinErrors(ErrInvalidVolID, err)
|
||||
}
|
||||
volOptions.ClusterID = vi.ClusterID
|
||||
|
@ -49,6 +49,7 @@ func addToManager(mgr manager.Manager, config Config) error {
|
||||
return fmt.Errorf("failed to add: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -65,16 +66,19 @@ func Start(config Config) error {
|
||||
mgr, err := manager.New(clientConfig.GetConfigOrDie(), opts)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to create manager %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
err = addToManager(mgr, config)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to add manager %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
err = mgr.Start(signals.SetupSignalHandler())
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to start manager %s", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -67,6 +67,7 @@ func newPVReconciler(mgr manager.Manager, config ctrl.Config) reconcile.Reconcil
|
||||
config: config,
|
||||
Locks: util.NewVolumeLocks(),
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
@ -85,6 +86,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch the changes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -97,6 +99,7 @@ func (r *ReconcilePersistentVolume) getCredentials(
|
||||
if name == "" || namespace == "" {
|
||||
errStr := "secret name or secret namespace is empty"
|
||||
util.ErrorLogMsg(errStr)
|
||||
|
||||
return nil, errors.New(errStr)
|
||||
}
|
||||
secret := &corev1.Secret{}
|
||||
@ -115,8 +118,10 @@ func (r *ReconcilePersistentVolume) getCredentials(
|
||||
cr, err = util.NewUserCredentials(credentials)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to get user credentials %s", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
@ -131,6 +136,7 @@ func checkStaticVolume(pv *corev1.PersistentVolume) (bool, error) {
|
||||
return false, fmt.Errorf("failed to parse preProvisionedVolume: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return static, nil
|
||||
}
|
||||
|
||||
@ -152,6 +158,7 @@ func (r ReconcilePersistentVolume) storeVolumeIDInPV(
|
||||
}
|
||||
pv.Labels[rbd.PVReplicatedLabelKey] = rbd.PVReplicatedLabelValue
|
||||
pv.Annotations[rbd.PVVolumeHandleAnnotationKey] = newVolumeID
|
||||
|
||||
return r.client.Update(ctx, pv)
|
||||
}
|
||||
|
||||
@ -198,6 +205,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
|
||||
cr, err := r.getCredentials(ctx, secretName, secretNamespace)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to get credentials from secret %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
@ -205,15 +213,18 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
|
||||
rbdVolID, err := rbd.RegenerateJournal(imageName, volumeHandler, pool, journalPool, requestName, cr)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to regenerate journal %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
if rbdVolID != volumeHandler {
|
||||
err = r.storeVolumeIDInPV(ctx, pv, rbdVolID)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to store volumeID in PV %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -227,6 +238,7 @@ func (r *ReconcilePersistentVolume) Reconcile(ctx context.Context,
|
||||
if apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Check if the object is under deletion
|
||||
@ -238,5 +250,6 @@ func (r *ReconcilePersistentVolume) Reconcile(ctx context.Context,
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
@ -75,6 +75,7 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities(
|
||||
if cs.Driver == nil {
|
||||
return nil, status.Error(codes.Unimplemented, "Controller server is not enabled")
|
||||
}
|
||||
|
||||
return &csi.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: cs.Driver.capabilities,
|
||||
}, nil
|
||||
|
@ -44,16 +44,19 @@ type CSIDriver struct {
|
||||
func NewCSIDriver(name, v, nodeID string) *CSIDriver {
|
||||
if name == "" {
|
||||
klog.Errorf("Driver name missing")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if nodeID == "" {
|
||||
klog.Errorf("NodeID missing")
|
||||
|
||||
return nil
|
||||
}
|
||||
// TODO version format and validation
|
||||
if v == "" {
|
||||
klog.Errorf("Version argument missing")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -78,6 +81,7 @@ func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapa
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c)) //nolint
|
||||
}
|
||||
|
||||
@ -103,6 +107,7 @@ func (d *CSIDriver) AddVolumeCapabilityAccessModes(
|
||||
vca = append(vca, NewVolumeCapabilityAccessMode(c))
|
||||
}
|
||||
d.vc = vca
|
||||
|
||||
return vca
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ func (ids *DefaultIdentityServer) GetPluginCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
util.TraceLog(ctx, "Using default capabilities")
|
||||
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
|
@ -104,6 +104,7 @@ func ConstructMountOptions(mountOptions []string, volCap *csi.VolumeCapability)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
for _, f := range m.MountFlags {
|
||||
@ -112,6 +113,7 @@ func ConstructMountOptions(mountOptions []string, volCap *csi.VolumeCapability)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mountOptions
|
||||
}
|
||||
|
||||
@ -122,5 +124,6 @@ func MountOptionContains(mountOptions []string, opt string) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ func parseEndpoint(ep string) (string, string, error) {
|
||||
return s[0], s[1], nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", fmt.Errorf("invalid endpoint: %v", ep)
|
||||
}
|
||||
|
||||
@ -55,6 +56,7 @@ func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *c
|
||||
// NewDefaultNodeServer initializes default node server.
|
||||
func NewDefaultNodeServer(d *CSIDriver, t string, topology map[string]string) *DefaultNodeServer {
|
||||
d.topology = topology
|
||||
|
||||
return &DefaultNodeServer{
|
||||
Driver: d,
|
||||
Type: t,
|
||||
@ -98,6 +100,7 @@ func isReplicationRequest(req interface{}) bool {
|
||||
default:
|
||||
isReplicationRequest = false
|
||||
}
|
||||
|
||||
return isReplicationRequest
|
||||
}
|
||||
|
||||
@ -145,6 +148,7 @@ func getReqID(req interface{}) string {
|
||||
case *replication.ResyncVolumeRequest:
|
||||
reqID = r.VolumeId
|
||||
}
|
||||
|
||||
return reqID
|
||||
}
|
||||
|
||||
@ -160,6 +164,7 @@ func contextIDInjector(
|
||||
if reqID := getReqID(req); reqID != "" {
|
||||
ctx = context.WithValue(ctx, util.ReqID, reqID)
|
||||
}
|
||||
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
@ -181,6 +186,7 @@ func logGRPC(
|
||||
} else {
|
||||
util.TraceLog(ctx, "GRPC response: %s", protosanitizer.StripSecrets(resp))
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -196,6 +202,7 @@ func panicHandler(
|
||||
err = status.Errorf(codes.Internal, "panic %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
@ -209,6 +216,7 @@ func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !isMnt {
|
||||
@ -228,6 +236,7 @@ func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.
|
||||
capacity, ok := (*(volMetrics.Capacity)).AsInt64()
|
||||
if !ok {
|
||||
util.ErrorLog(ctx, "failed to fetch capacity bytes")
|
||||
|
||||
return nil, status.Error(codes.Unknown, "failed to fetch capacity bytes")
|
||||
}
|
||||
used, ok := (*(volMetrics.Used)).AsInt64()
|
||||
@ -237,6 +246,7 @@ func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.
|
||||
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
|
||||
if !ok {
|
||||
util.ErrorLog(ctx, "failed to fetch available inodes")
|
||||
|
||||
return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
|
||||
}
|
||||
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
|
||||
@ -248,6 +258,7 @@ func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.
|
||||
if !ok {
|
||||
util.ErrorLog(ctx, "failed to fetch used inodes")
|
||||
}
|
||||
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
{
|
||||
|
@ -76,13 +76,16 @@ func getOMapValues(
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
util.ErrorLog(ctx, "omap not found (pool=%q, namespace=%q, name=%q): %v",
|
||||
poolName, namespace, oid, err)
|
||||
|
||||
return nil, util.JoinErrors(util.ErrKeyNotFound, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "got omap values: (pool=%q, namespace=%q, name=%q): %+v",
|
||||
poolName, namespace, oid, results)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@ -112,11 +115,13 @@ func removeMapKeys(
|
||||
} else {
|
||||
util.ErrorLog(ctx, "failed removing omap keys (pool=%q, namespace=%q, name=%q): %v",
|
||||
poolName, namespace, oid, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
util.DebugLog(ctx, "removed omap keys (pool=%q, namespace=%q, name=%q): %+v",
|
||||
poolName, namespace, oid, keys)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -143,10 +148,12 @@ func setOMapKeys(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed setting omap keys (pool=%q, namespace=%q, name=%q, pairs=%+v): %v",
|
||||
poolName, namespace, oid, pairs, err)
|
||||
|
||||
return err
|
||||
}
|
||||
util.DebugLog(ctx, "set omap keys (pool=%q, namespace=%q, name=%q): %+v)",
|
||||
poolName, namespace, oid, pairs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -154,5 +161,6 @@ func omapPoolError(err error) error {
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return util.JoinErrors(util.ErrPoolNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -196,6 +196,7 @@ func NewCSISnapshotJournal(suffix string) *Config {
|
||||
func NewCSIVolumeJournalWithNamespace(suffix, ns string) *Config {
|
||||
j := NewCSIVolumeJournal(suffix)
|
||||
j.namespace = ns
|
||||
|
||||
return j
|
||||
}
|
||||
|
||||
@ -204,6 +205,7 @@ func NewCSIVolumeJournalWithNamespace(suffix, ns string) *Config {
|
||||
func NewCSISnapshotJournalWithNamespace(suffix, ns string) *Config {
|
||||
j := NewCSISnapshotJournal(suffix)
|
||||
j.namespace = ns
|
||||
|
||||
return j
|
||||
}
|
||||
|
||||
@ -216,6 +218,7 @@ func (cj *Config) GetNameForUUID(prefix, uid string, isSnapshot bool) string {
|
||||
prefix = defaultVolumeNamingPrefix
|
||||
}
|
||||
}
|
||||
|
||||
return prefix + uid
|
||||
}
|
||||
|
||||
@ -251,6 +254,7 @@ func (cj *Config) Connect(monitors, namespace string, cr *util.Credentials) (*Co
|
||||
cr: cr,
|
||||
conn: cc,
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
@ -282,6 +286,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
if parentName != "" {
|
||||
if cj.cephSnapSourceKey == "" {
|
||||
err := errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
snapSource = true
|
||||
@ -300,6 +305,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
// stop processing but without an error for no reservation exists
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
objUUIDAndPool, found := values[cj.csiNameKeyPrefix+reqName]
|
||||
@ -330,6 +336,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
err = conn.UndoReservation(ctx, journalPool, "", "", reqName)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -343,6 +350,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
err = conn.UndoReservation(ctx, journalPool, savedImagePool,
|
||||
cj.GetNameForUUID(namePrefix, objUUID, snapSource), reqName)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -375,6 +383,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
err = fmt.Errorf("%w: snapname points to different volume, request name (%s)"+
|
||||
" source name (%s) saved source name (%s)", util.ErrSnapNameConflict,
|
||||
reqName, parentName, savedImageAttributes.SourceName)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -429,6 +438,7 @@ func (conn *Connection) UndoReservation(ctx context.Context,
|
||||
if err != nil {
|
||||
if !errors.Is(err, util.ErrObjectNotFound) {
|
||||
util.ErrorLog(ctx, "failed removing oMap %s (%s)", cj.cephUUIDDirectoryPrefix+imageUUID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -439,6 +449,7 @@ func (conn *Connection) UndoReservation(ctx context.Context,
|
||||
[]string{cj.csiNameKeyPrefix + reqName})
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed removing oMap key %s (%s)", cj.csiNameKeyPrefix+reqName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -477,6 +488,7 @@ func reserveOMapName(
|
||||
// try again with a different uuid, for maxAttempts tries
|
||||
util.DebugLog(ctx, "uuid (%s) conflict detected, retrying (attempt %d of %d)",
|
||||
iterUUID, attempt, maxAttempts)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@ -534,6 +546,7 @@ func (conn *Connection) ReserveName(ctx context.Context,
|
||||
if parentName != "" {
|
||||
if cj.cephSnapSourceKey == "" {
|
||||
err = errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
snapSource = true
|
||||
@ -623,6 +636,7 @@ func (conn *Connection) ReserveName(ctx context.Context,
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return volUUID, imageName, nil
|
||||
}
|
||||
|
||||
@ -650,6 +664,7 @@ func (conn *Connection) GetImageAttributes(
|
||||
|
||||
if snapSource && cj.cephSnapSourceKey == "" {
|
||||
err = errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -720,6 +735,7 @@ func (conn *Connection) StoreImageID(ctx context.Context, pool, reservedUUID, im
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -749,8 +765,10 @@ func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
|
||||
// stop processing but without an error for no reservation exists
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
return values[cj.csiNameKeyPrefix+volumeHandle], nil
|
||||
}
|
||||
|
||||
@ -767,5 +785,6 @@ func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context,
|
||||
setKeys := map[string]string{
|
||||
cj.csiNameKeyPrefix + oldVolumeHandle: newVolumeHandle,
|
||||
}
|
||||
|
||||
return setOMapKeys(ctx, conn, journalPool, cj.namespace, cj.csiDirectory, setKeys)
|
||||
}
|
||||
|
@ -44,12 +44,14 @@ func getLiveness(timeout time.Duration, csiConn *grpc.ClientConn) {
|
||||
if err != nil {
|
||||
liveness.Set(0)
|
||||
util.ErrorLogMsg("health check failed: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !ready {
|
||||
liveness.Set(0)
|
||||
util.ErrorLogMsg("driver responded but is not ready")
|
||||
|
||||
return
|
||||
}
|
||||
liveness.Set(1)
|
||||
|
@ -61,12 +61,14 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
err = tempClone.deleteSnapshot(ctx, snap)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
@ -90,6 +92,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
case !errors.Is(err, ErrImageNotFound):
|
||||
// any error other than image not found return error
|
||||
@ -104,13 +107,16 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rv.RbdImageName, snap.RbdSnapName, err)
|
||||
err = fmt.Errorf("failed to clone rbd image %s from snapshot %s: %w", rv.RbdImageName, snap.RbdSnapName, err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
err = tempClone.deleteSnapshot(ctx, snap)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot: %v", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
// as the temp clone does not exist,check snapshot exists on parent volume
|
||||
@ -125,6 +131,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
@ -142,6 +149,7 @@ func (rv *rbdVolume) generateTempClone() *rbdVolume {
|
||||
// this name will be always unique, as cephcsi never creates an image with
|
||||
// this format for new rbd images
|
||||
tempClone.RbdImageName = rv.RbdImageName + "-temp"
|
||||
|
||||
return &tempClone
|
||||
}
|
||||
|
||||
@ -160,6 +168,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
|
||||
err = rv.getImageID()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get volume id %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -180,8 +189,10 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
|
||||
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to store volume %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -248,6 +259,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
|
||||
if errClone != nil {
|
||||
// set errFlatten error to cleanup temporary snapshot and temporary clone
|
||||
errFlatten = errors.New("failed to create user requested cloned image")
|
||||
|
||||
return errClone
|
||||
}
|
||||
}
|
||||
@ -285,5 +297,6 @@ func (rv *rbdVolume) flattenCloneImage(ctx context.Context) error {
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
return rv.flattenRbdImage(ctx, rv.conn.Creds, false, hardLimit, softLimit)
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.Crea
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||
util.ErrorLog(ctx, "invalid create volume req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return err
|
||||
}
|
||||
// Check sanity of request Name, Volume Capabilities
|
||||
@ -175,6 +176,7 @@ func buildCreateVolumeResponse(req *csi.CreateVolumeRequest, rbdVol *rbdVolume)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{Volume: volume}
|
||||
}
|
||||
|
||||
@ -188,6 +190,7 @@ func getGRPCErrorForCreateVolume(err error) error {
|
||||
if errors.Is(err, ErrFlattenInProgress) {
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -223,6 +226,7 @@ func validateRequestedVolumeSize(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnaps
|
||||
parentVol.VolSize)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -254,6 +258,7 @@ func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSna
|
||||
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -281,6 +286,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// Existence and conflict checks
|
||||
if acquired := cs.VolumeLocks.TryAcquire(req.GetName()); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
||||
}
|
||||
defer cs.VolumeLocks.Release(req.GetName())
|
||||
@ -288,6 +294,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
err = rbdVol.Connect(cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -333,6 +340,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if errors.Is(err, ErrFlattenInProgress) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -356,6 +364,7 @@ func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -402,6 +411,7 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "failed to remove volume %q from journal: %s", rbdVol, err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(
|
||||
codes.Aborted,
|
||||
"restoring thick-provisioned volume %q has been interrupted, please retry", rbdVol)
|
||||
@ -443,6 +453,7 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to remove volume %q from journal: %s", rbdVol, err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"cloning thick-provisioned volume %q has been interrupted, please retry", rbdVol)
|
||||
@ -470,6 +481,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
|
||||
if errors.Is(err, ErrImageNotFound) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -490,6 +502,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
|
||||
if err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return status.Errorf(codes.ResourceExhausted, "rbd image %s has %d snapshots", rbdVol, len(snaps))
|
||||
}
|
||||
|
||||
@ -515,6 +528,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -530,14 +544,17 @@ func checkFlatten(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials)
|
||||
}
|
||||
if errDefer := deleteImage(ctx, rbdVol, cr); errDefer != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, errDefer)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
errDefer := undoVolReservation(ctx, rbdVol, cr)
|
||||
if errDefer != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", rbdVol.RequestName, errDefer)
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -550,6 +567,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
|
||||
return status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(snapshotID)
|
||||
@ -558,8 +576,10 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
||||
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -583,11 +603,13 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
||||
err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rbdVol, rbdSnap, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -609,6 +631,7 @@ func (cs *ControllerServer) createBackingImage(
|
||||
case rbdSnap != nil:
|
||||
if err = cs.OperationLocks.GetRestoreLock(rbdSnap.VolID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(rbdSnap.VolID)
|
||||
@ -621,14 +644,17 @@ func (cs *ControllerServer) createBackingImage(
|
||||
case parentVol != nil:
|
||||
if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(parentVol.VolID)
|
||||
|
||||
return rbdVol.createCloneFromImage(ctx, parentVol)
|
||||
default:
|
||||
err = createImage(ctx, rbdVol, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create volume: %v", err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -653,9 +679,11 @@ func (cs *ControllerServer) createBackingImage(
|
||||
err = rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -683,8 +711,10 @@ func checkContentSource(
|
||||
if !errors.Is(err, ErrSnapNotFound) {
|
||||
return nil, nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
|
||||
}
|
||||
|
||||
return nil, rbdSnap, nil
|
||||
case *csi.VolumeContentSource_Volume:
|
||||
vol := req.VolumeContentSource.GetVolume()
|
||||
@ -701,10 +731,13 @@ func checkContentSource(
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
return nil, nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
|
||||
}
|
||||
|
||||
return rbdvol, nil, nil
|
||||
}
|
||||
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source")
|
||||
}
|
||||
|
||||
@ -717,6 +750,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||
util.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -734,6 +768,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
|
||||
if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(volumeID)
|
||||
@ -741,6 +776,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// lock out volumeID for clone and expand operation
|
||||
if err = cs.OperationLocks.GetDeleteLock(volumeID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
|
||||
@ -750,6 +786,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -758,6 +795,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// success as deletion is complete
|
||||
if errors.Is(err, util.ErrKeyNotFound) {
|
||||
util.WarningLog(ctx, "Failed to volume options for %s: %v", volumeID, err)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -771,6 +809,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// unreserve for the same
|
||||
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
||||
@ -778,6 +817,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -785,6 +825,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// cleanup the image and associated omaps for the same
|
||||
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
||||
@ -792,10 +833,12 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
inUse, err := rbdVol.isInUse()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if inUse {
|
||||
util.ErrorLog(ctx, "rbd %s is still being used", rbdVol)
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "rbd %s is still being used", rbdVol.RbdImageName)
|
||||
}
|
||||
|
||||
@ -808,6 +851,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
|
||||
tempClone, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -817,12 +861,14 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
if err = deleteImage(ctx, rbdVol, cr); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
|
||||
rbdVol, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
|
||||
rbdVol.RequestName, rbdVol.RbdImageName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -847,6 +893,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{
|
||||
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
|
||||
VolumeCapabilities: req.VolumeCapabilities,
|
||||
@ -881,6 +928,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -903,6 +951,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(req.GetName()); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, req.GetName())
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(req.GetName())
|
||||
@ -910,6 +959,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// Take lock on parent rbd image
|
||||
if err = cs.OperationLocks.GetSnapshotCreateLock(rbdSnap.SourceVolumeID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseSnapshotCreateLock(rbdSnap.SourceVolumeID)
|
||||
@ -921,6 +971,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if errors.Is(err, util.ErrSnapNameConflict) {
|
||||
return nil, status.Error(codes.AlreadyExists, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
if found {
|
||||
@ -977,6 +1028,7 @@ func cloneFromSnapshot(
|
||||
if uErr != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
defer vol.Destroy()
|
||||
@ -1025,6 +1077,7 @@ func cloneFromSnapshot(
|
||||
if uErr != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1043,6 +1096,7 @@ func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.Cr
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1061,6 +1115,7 @@ func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.Cr
|
||||
if value, ok := options["pool"]; ok && value == "" {
|
||||
return status.Error(codes.InvalidArgument, "empty pool name in which rbd image will be created")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1085,6 +1140,7 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot: %v", err)
|
||||
|
||||
return ready, cloneRbd, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1105,6 +1161,7 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
if cryptErr != nil {
|
||||
util.WarningLog(ctx, "failed copy encryption "+
|
||||
"config for %q: %v", cloneRbd, cryptErr)
|
||||
|
||||
return ready, nil, status.Errorf(codes.Internal,
|
||||
err.Error())
|
||||
}
|
||||
@ -1131,6 +1188,7 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
// update rbd image name for logging
|
||||
rbdSnap.RbdImageName = cloneRbd.RbdImageName
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, err)
|
||||
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
}
|
||||
@ -1138,12 +1196,14 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
err = cloneRbd.getImageID()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get image id: %v", err)
|
||||
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
// save image ID
|
||||
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to connect to cluster: %v", err)
|
||||
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
defer j.Destroy()
|
||||
@ -1151,6 +1211,7 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
err = j.StoreImageID(ctx, rbdSnap.JournalPool, rbdSnap.ReservedID, cloneRbd.ImageID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to reserve volume id: %v", err)
|
||||
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
|
||||
@ -1159,9 +1220,11 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
if errors.Is(err, ErrFlattenInProgress) {
|
||||
return ready, cloneRbd, nil
|
||||
}
|
||||
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
ready = true
|
||||
|
||||
return ready, cloneRbd, nil
|
||||
}
|
||||
|
||||
@ -1173,6 +1236,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1189,6 +1253,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(snapshotID)
|
||||
@ -1196,6 +1261,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// lock out snapshotID for restore operation
|
||||
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
||||
@ -1206,6 +1272,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// need to worry about deleting snapshot or omap data, return success
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
}
|
||||
|
||||
@ -1223,6 +1290,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// name
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(rbdSnap.RequestName); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(rbdSnap.RequestName)
|
||||
@ -1242,6 +1310,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
@ -1251,6 +1320,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete image: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -1258,6 +1328,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)",
|
||||
rbdSnap.RequestName, rbdSnap.RbdSnapName, rbdSnap.RbdImageName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1270,6 +1341,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {
|
||||
util.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1292,6 +1364,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
// lock out parallel requests against the same volume ID
|
||||
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
defer cs.VolumeLocks.Release(volID)
|
||||
@ -1299,6 +1372,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
// lock out volumeID for clone and delete operation
|
||||
if err := cs.OperationLocks.GetExpandLock(volID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseExpandLock(volID)
|
||||
@ -1321,6 +1395,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1338,6 +1413,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
err = rbdVol.resize(volSize)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to resize rbd image: %s with error: %v", rbdVol, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -91,6 +91,7 @@ func NewReplicationServer(c *ControllerServer) *ReplicationServer {
|
||||
// NewNodeServer initialize a node server for rbd CSI driver.
|
||||
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) (*NodeServer, error) {
|
||||
mounter := mount.New("")
|
||||
|
||||
return &NodeServer{
|
||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
|
||||
mounter: mounter,
|
||||
|
@ -65,14 +65,17 @@ func (ri *rbdImage) checkRbdImageEncrypted(ctx context.Context) (rbdEncryptionSt
|
||||
value, err := ri.GetMetadata(encryptionMetaKey)
|
||||
if errors.Is(err, librbd.ErrNotFound) {
|
||||
util.DebugLog(ctx, "image %s encrypted state not set", ri)
|
||||
|
||||
return rbdImageEncryptionUnknown, nil
|
||||
} else if err != nil {
|
||||
util.ErrorLog(ctx, "checking image %s encrypted state metadata failed: %s", ri, err)
|
||||
|
||||
return rbdImageEncryptionUnknown, err
|
||||
}
|
||||
|
||||
encrypted := rbdEncryptionState(strings.TrimSpace(value))
|
||||
util.DebugLog(ctx, "image %s encrypted state metadata reports %q", ri, encrypted)
|
||||
|
||||
return encrypted, nil
|
||||
}
|
||||
|
||||
@ -98,6 +101,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error {
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to save encryption passphrase for "+
|
||||
"image %s: %s", ri, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -105,6 +109,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error {
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to save encryption status, deleting "+
|
||||
"image %s: %s", ri, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -181,18 +186,21 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v",
|
||||
ri, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = util.EncryptVolume(ctx, devicePath, passphrase); err != nil {
|
||||
err = fmt.Errorf("failed to encrypt volume %s: %w", ri, err)
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = ri.ensureEncryptionMetadataSet(rbdImageEncrypted)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -204,6 +212,7 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v",
|
||||
rv, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -212,6 +221,7 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string)
|
||||
isOpen, err := util.IsDeviceOpen(ctx, mapperFilePath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to check device %s encryption status: %s", devicePath, err)
|
||||
|
||||
return devicePath, err
|
||||
}
|
||||
if isOpen {
|
||||
@ -221,6 +231,7 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to open device %s: %v",
|
||||
rv, err)
|
||||
|
||||
return devicePath, err
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func (ri *rbdImage) enableImageMirroring(mode librbd.ImageMirrorMode) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to enable mirroring on %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -53,6 +54,7 @@ func (ri *rbdImage) disableImageMirroring(force bool) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to disable mirroring on %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -68,6 +70,7 @@ func (ri *rbdImage) getImageMirroringInfo() (*librbd.MirrorImageInfo, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get mirroring info of %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -82,6 +85,7 @@ func (ri *rbdImage) promoteImage(force bool) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to promote image %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -96,6 +100,7 @@ func (ri *rbdImage) demoteImage() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demote image %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -110,6 +115,7 @@ func (ri *rbdImage) resyncImage() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resync image %q with error: %w", ri, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -148,6 +154,7 @@ func (ri *rbdImage) getImageMirroringStatus() (*imageMirrorStatus, error) {
|
||||
": (2) No such file or directory") {
|
||||
return nil, util.JoinErrors(ErrImageNotFound, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -157,5 +164,6 @@ func (ri *rbdImage) getImageMirroringStatus() (*imageMirrorStatus, error) {
|
||||
return nil, fmt.Errorf("unmarshal failed (%w), raw buffer response: %s", err, stdout)
|
||||
}
|
||||
}
|
||||
|
||||
return &imgStatus, nil
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ func isHealerContext(parameters map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return healerContext
|
||||
}
|
||||
|
||||
@ -121,6 +122,7 @@ func isStaticVolume(parameters map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return staticVol
|
||||
}
|
||||
|
||||
@ -130,6 +132,7 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
|
||||
imgInfo, err := lookupRBDImageMetadataStash(metaDataPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to find image metadata, at stagingPath: %s, err: %v", metaDataPath, err)
|
||||
|
||||
return err
|
||||
}
|
||||
if imgInfo.DevicePath == "" {
|
||||
@ -141,6 +144,7 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
|
||||
return err
|
||||
}
|
||||
util.DebugLog(ctx, "rbd volID: %s was successfully attached to device: %s", volOps.VolID, devicePath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -177,6 +181,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
"invalid AccessMode for volume: %v",
|
||||
req.GetVolumeId(),
|
||||
)
|
||||
|
||||
return nil, status.Error(
|
||||
codes.InvalidArgument,
|
||||
"rbd: RWX access mode request is only valid for volumes with access type `block`",
|
||||
@ -196,6 +201,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
defer ns.VolumeLocks.Release(volID)
|
||||
@ -212,6 +218,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
} else if !isNotMnt {
|
||||
util.DebugLog(ctx, "rbd: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
}
|
||||
@ -241,12 +248,14 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
err = vi.DecomposeCSIID(volID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error decoding volume ID (%s): %w", volID, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
j, connErr := volJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
|
||||
if connErr != nil {
|
||||
util.ErrorLog(ctx, "failed to establish cluster connection: %v", connErr)
|
||||
|
||||
return nil, status.Error(codes.Internal, connErr.Error())
|
||||
}
|
||||
defer j.Destroy()
|
||||
@ -255,6 +264,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
ctx, volOptions.Pool, vi.ObjectUUID, false)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error fetching image attributes for volume ID (%s): %w", volID, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
volOptions.RbdImageName = imageAttributes.ImageName
|
||||
@ -268,6 +278,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
err = volOptions.Connect(cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to connect to volume %s: %v", volOptions, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
@ -277,6 +288,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -409,6 +421,7 @@ func (ns *NodeServer) stageTransaction(
|
||||
// #nosec - allow anyone to write inside the target path
|
||||
err = os.Chmod(stagingTargetPath, 0o777)
|
||||
}
|
||||
|
||||
return transaction, err
|
||||
}
|
||||
|
||||
@ -424,6 +437,7 @@ func (ns *NodeServer) undoStagingTransaction(
|
||||
err = ns.mounter.Unmount(stagingTargetPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -458,6 +472,7 @@ func (ns *NodeServer) undoStagingTransaction(
|
||||
// Cleanup the stashed image metadata
|
||||
if err = cleanupRBDImageMetadataStash(req.GetStagingTargetPath()); err != nil {
|
||||
util.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -468,10 +483,12 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
|
||||
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if err = pathFile.Close(); err != nil {
|
||||
util.ErrorLog(ctx, "failed to close mountPath:%s with error: %v", mountPath, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -482,6 +499,7 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
|
||||
if err != nil {
|
||||
if !os.IsExist(err) {
|
||||
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -524,6 +542,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "rbd: successfully mounted stagingPath %s to targetPath %s", stagingPath, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -548,6 +567,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
|
||||
existingFormat, err := diskMounter.GetDiskFormat(devicePath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get disk format for path %s, error: %v", devicePath, err)
|
||||
|
||||
return readOnly, err
|
||||
}
|
||||
|
||||
@ -587,6 +607,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
|
||||
cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput()
|
||||
if cmdErr != nil {
|
||||
util.ErrorLog(ctx, "failed to run mkfs error: %v, output: %v", cmdErr, string(cmdOut))
|
||||
|
||||
return readOnly, cmdErr
|
||||
}
|
||||
}
|
||||
@ -607,6 +628,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
|
||||
req.GetVolumeId(),
|
||||
err)
|
||||
}
|
||||
|
||||
return readOnly, err
|
||||
}
|
||||
|
||||
@ -647,10 +669,12 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
|
||||
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
|
||||
if e != nil {
|
||||
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
|
||||
return notMnt, status.Error(codes.Internal, e.Error())
|
||||
}
|
||||
if err = pathFile.Close(); err != nil {
|
||||
util.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err)
|
||||
|
||||
return notMnt, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
@ -660,6 +684,7 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
|
||||
}
|
||||
}
|
||||
notMnt = true
|
||||
|
||||
return notMnt, err
|
||||
}
|
||||
|
||||
@ -680,14 +705,17 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
if os.IsNotExist(err) {
|
||||
// targetPath has already been deleted
|
||||
util.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
if notMnt {
|
||||
if err = os.RemoveAll(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -730,6 +758,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
defer ns.VolumeLocks.Release(volID)
|
||||
@ -750,6 +779,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
err = ns.mounter.Unmount(stagingTargetPath)
|
||||
if err != nil {
|
||||
util.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
util.DebugLog(ctx, "successfully unmounted volume (%s) from staging path (%s)",
|
||||
@ -762,6 +792,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
// error
|
||||
if !os.IsNotExist(err) {
|
||||
util.ErrorLog(ctx, "failed to remove staging target path (%s): (%v)", stagingTargetPath, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -800,6 +831,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
req.GetVolumeId(),
|
||||
stagingTargetPath,
|
||||
err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -807,6 +839,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
|
||||
if err = cleanupRBDImageMetadataStash(stagingParentPath); err != nil {
|
||||
util.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -837,6 +870,7 @@ func (ns *NodeServer) NodeExpandVolume(
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer ns.VolumeLocks.Release(volumeID)
|
||||
@ -853,6 +887,7 @@ func (ns *NodeServer) NodeExpandVolume(
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "rbd: resize failed on path %s, error: %v", req.GetVolumePath(), err)
|
||||
}
|
||||
|
||||
return &csi.NodeExpandVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -870,6 +905,7 @@ func getDevicePath(ctx context.Context, volumePath string) (string, error) {
|
||||
if found {
|
||||
return device, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get device for stagingtarget path %v", volumePath)
|
||||
}
|
||||
|
||||
@ -913,6 +949,7 @@ func (ns *NodeServer) processEncryptedDevice(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get encryption status for rbd image %s: %v",
|
||||
imageSpec, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -928,6 +965,7 @@ func (ns *NodeServer) processEncryptedDevice(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to setup encryption for rbd"+
|
||||
"image %s: %v", imageSpec, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -988,11 +1026,13 @@ func (ns *NodeServer) xfsSupportsReflink() bool {
|
||||
// mkfs.xfs should fail with an error message (and help text)
|
||||
if strings.Contains(string(out), "reflink=0|1") {
|
||||
xfsHasReflink = xfsReflinkSupport
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
xfsHasReflink = xfsReflinkNoSupport
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -1004,6 +1044,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
targetPath := req.GetVolumePath()
|
||||
if targetPath == "" {
|
||||
err = fmt.Errorf("targetpath %v is empty", targetPath)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
@ -1033,6 +1074,7 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
|
||||
if err != nil {
|
||||
err = fmt.Errorf("lsblk %v returned an error: %w", args, err)
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1040,6 +1082,7 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to convert %q to bytes: %w", lsblkSize, err)
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
|
@ -140,6 +140,7 @@ func findDeviceMappingImage(ctx context.Context, pool, namespace, image string,
|
||||
rbdDeviceList, err := rbdGetDeviceList(ctx, accessType)
|
||||
if err != nil {
|
||||
util.WarningLog(ctx, "failed to determine if image (%s) is mapped to a device (%v)", imageSpec, err)
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
@ -177,14 +178,17 @@ func checkRbdNbdTools() bool {
|
||||
_, _, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd)
|
||||
if err != nil {
|
||||
util.ExtendedLogMsg("rbd-nbd: nbd modprobe failed with error %v", err)
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
if _, _, err := util.ExecCommand(context.TODO(), rbdTonbd, "--version"); err != nil {
|
||||
util.ExtendedLogMsg("rbd-nbd: running rbd-nbd --version failed with error %v", err)
|
||||
|
||||
return false
|
||||
}
|
||||
util.ExtendedLogMsg("rbd-nbd tools were found.")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -325,6 +329,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
|
||||
util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("rbd: map failed with error %w, rbd error output: %s", err, stderr)
|
||||
}
|
||||
devicePath := strings.TrimSuffix(stdout, "\n")
|
||||
@ -342,8 +347,10 @@ func waitForrbdImage(ctx context.Context, backoff wait.Backoff, volOptions *rbdV
|
||||
}
|
||||
if (volOptions.DisableInUseChecks) && (used) {
|
||||
util.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result")
|
||||
|
||||
return used, nil
|
||||
}
|
||||
|
||||
return !used, nil
|
||||
})
|
||||
// return error if rbd image has not become available for the specified timeout
|
||||
@ -376,6 +383,7 @@ func detachRBDImageOrDeviceSpec(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s",
|
||||
mapperPath, imageOrDeviceSpec, err)
|
||||
|
||||
return err
|
||||
}
|
||||
if len(mapper) > 0 {
|
||||
@ -384,6 +392,7 @@ func detachRBDImageOrDeviceSpec(
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s",
|
||||
mapperPath, imageOrDeviceSpec, err)
|
||||
|
||||
return err
|
||||
}
|
||||
imageOrDeviceSpec = mappedDevice
|
||||
@ -402,8 +411,10 @@ func detachRBDImageOrDeviceSpec(
|
||||
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, imageOrDeviceSpec))) {
|
||||
// Devices found not to be mapped are treated as a successful detach
|
||||
util.TraceLog(ctx, "image or device spec (%s) not mapped", imageOrDeviceSpec)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", imageOrDeviceSpec, err, stderr)
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ func accessModeStrToInt(mode v1.PersistentVolumeAccessMode) csi.VolumeCapability
|
||||
case v1.ReadWriteMany:
|
||||
return csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
|
||||
}
|
||||
|
||||
return csi.VolumeCapability_AccessMode_UNKNOWN
|
||||
}
|
||||
|
||||
@ -54,6 +55,7 @@ func getSecret(c *k8s.Clientset, ns, name string) (map[string]string, error) {
|
||||
secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("get secret failed, err: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -78,6 +80,7 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
|
||||
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Name)
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("getSecret failed for volID: %s, err: %v", volID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -113,6 +116,7 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("nodeStageVolume request failed, volID: %s, stagingPath: %s, err: %v",
|
||||
volID, stagingParentPath, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -125,6 +129,7 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
|
||||
val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("list volumeAttachments failed, err: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -142,6 +147,7 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
util.ErrorLogMsg("get persistentVolumes failed for pv: %s, err: %v", pvName, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
// TODO: check with pv delete annotations, for eg: what happens when the pv is marked for delete
|
||||
@ -162,6 +168,7 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
|
||||
util.ErrorLogMsg("get volumeAttachments failed for volumeAttachment: %s, volID: %s, err: %v",
|
||||
val.Items[i].Name, pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
if !va.Status.Attached {
|
||||
|
@ -168,11 +168,13 @@ func checkSnapCloneExists(
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrSnapNotFound) {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
@ -198,6 +200,7 @@ func checkSnapCloneExists(
|
||||
if sErr != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr)
|
||||
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
@ -210,18 +213,21 @@ func checkSnapCloneExists(
|
||||
if sErr != nil {
|
||||
util.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr)
|
||||
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID)
|
||||
if sErr != nil {
|
||||
util.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr)
|
||||
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)",
|
||||
rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -299,8 +305,10 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
|
||||
}
|
||||
err = j.UndoReservation(ctx, rv.JournalPool, rv.Pool,
|
||||
rv.RbdImageName, rv.RequestName)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
@ -327,6 +335,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
|
||||
err = parentVol.copyEncryptionConfig(&rv.rbdImage)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
@ -348,11 +357,13 @@ func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection) e
|
||||
err := rv.getImageID()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -544,6 +555,7 @@ func RegenerateJournal(
|
||||
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -593,6 +605,7 @@ func RegenerateJournal(
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return rbdVol.VolID, nil
|
||||
}
|
||||
|
||||
@ -635,12 +648,15 @@ func (rv *rbdVolume) storeImageID(ctx context.Context, j *journal.Connection) er
|
||||
err := rv.getImageID()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -190,6 +190,7 @@ func (ri *rbdImage) Connect(cr *util.Credentials) error {
|
||||
}
|
||||
|
||||
ri.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -212,6 +213,7 @@ func (ri *rbdImage) String() string {
|
||||
if ri.RadosNamespace != "" {
|
||||
return fmt.Sprintf("%s/%s/%s", ri.Pool, ri.RadosNamespace, ri.RbdImageName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s", ri.Pool, ri.RbdImageName)
|
||||
}
|
||||
|
||||
@ -220,6 +222,7 @@ func (rs *rbdSnapshot) String() string {
|
||||
if rs.RadosNamespace != "" {
|
||||
return fmt.Sprintf("%s/%s/%s@%s", rs.Pool, rs.RadosNamespace, rs.RbdImageName, rs.RbdSnapName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s@%s", rs.Pool, rs.RbdImageName, rs.RbdSnapName)
|
||||
}
|
||||
|
||||
@ -275,6 +278,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
// nolint:errcheck // deleteImage() will log errors in
|
||||
// case it fails, no need to log them here again
|
||||
_ = deleteImage(ctx, pOpts, cr)
|
||||
|
||||
return fmt.Errorf("failed to thick provision image: %w", err)
|
||||
}
|
||||
|
||||
@ -283,6 +287,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
// nolint:errcheck // deleteImage() will log errors in
|
||||
// case it fails, no need to log them here again
|
||||
_ = deleteImage(ctx, pOpts, cr)
|
||||
|
||||
return fmt.Errorf("failed to mark image as thick-provisioned: %w", err)
|
||||
}
|
||||
}
|
||||
@ -324,6 +329,7 @@ func (rv *rbdVolume) getImageID() error {
|
||||
return err
|
||||
}
|
||||
rv.ImageID = id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -342,8 +348,10 @@ func (ri *rbdImage) open() (*librbd.Image, error) {
|
||||
if errors.Is(err, librbd.ErrNotFound) {
|
||||
err = util.JoinErrors(ErrImageNotFound, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
@ -454,6 +462,7 @@ func (rv *rbdVolume) isInUse() (bool, error) {
|
||||
// mirror daemon for mirrored images.
|
||||
defaultWatchers++
|
||||
}
|
||||
|
||||
return len(watchers) > defaultWatchers, nil
|
||||
}
|
||||
|
||||
@ -471,6 +480,7 @@ func isNotMountPoint(mounter mount.Interface, stagingTargetPath string) (bool, e
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return isNotMnt, err
|
||||
}
|
||||
|
||||
@ -509,6 +519,7 @@ func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (boo
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%w. stdError:%s", err, stderr)
|
||||
}
|
||||
|
||||
return supported, err
|
||||
}
|
||||
|
||||
@ -540,6 +551,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
err = rbdImage.Trash(0)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", pOpts, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -554,6 +566,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
rbdCephMgrSupported, err := addRbdManagerTask(ctx, pOpts, args)
|
||||
if rbdCephMgrSupported && err != nil {
|
||||
util.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", pOpts, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -561,6 +574,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
err = librbd.TrashRemove(pOpts.ioctx, pOpts.ImageID, true)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s, %v", pOpts, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -599,6 +613,7 @@ func (rv *rbdVolume) getCloneDepth(ctx context.Context) (uint, error) {
|
||||
return depth, nil
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to check depth on image %s: %s", &vol, err)
|
||||
|
||||
return depth, err
|
||||
}
|
||||
if vol.ParentName != "" {
|
||||
@ -627,6 +642,7 @@ func flattenClonedRbdImages(
|
||||
err := rv.Connect(cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to open connection %s; err %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
var origNameList []trashSnapInfo
|
||||
@ -652,9 +668,11 @@ func flattenClonedRbdImages(
|
||||
err = rv.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to flatten %s; err %v", rv, err)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -694,6 +712,7 @@ func (rv *rbdVolume) flattenRbdImage(
|
||||
return nil
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
|
||||
|
||||
return err
|
||||
}
|
||||
if forceFlatten || depth >= hardlimit {
|
||||
@ -713,6 +732,7 @@ func (rv *rbdVolume) flattenRbdImage(
|
||||
err := rv.flatten()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -732,6 +752,7 @@ func (rv *rbdVolume) getParentName() (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return parentInfo.Image.ImageName, nil
|
||||
}
|
||||
|
||||
@ -753,6 +774,7 @@ func (rv *rbdVolume) flatten() error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -792,6 +814,7 @@ func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint
|
||||
return false, nil
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to get image info for %s: %s", vol.String(), err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
if f := vol.hasFeature(feature); f {
|
||||
@ -821,6 +844,7 @@ func genSnapFromSnapID(
|
||||
err := vi.DecomposeCSIID(rbdSnap.VolID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -830,6 +854,7 @@ func genSnapFromSnapID(
|
||||
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(options)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -925,6 +950,7 @@ func generateVolumeFromVolumeID(
|
||||
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return rbdVol, err
|
||||
}
|
||||
|
||||
@ -983,6 +1009,7 @@ func generateVolumeFromVolumeID(
|
||||
}
|
||||
}
|
||||
err = rbdVol.getImageInfo()
|
||||
|
||||
return rbdVol, err
|
||||
}
|
||||
|
||||
@ -1016,10 +1043,12 @@ func genVolFromVolID(
|
||||
if pvlist.Items[i].Spec.CSI != nil && pvlist.Items[i].Spec.CSI.VolumeHandle == volumeID {
|
||||
if v, ok := pvlist.Items[i].Annotations[PVVolumeHandleAnnotationKey]; ok {
|
||||
util.UsefulLog(ctx, "found new volumeID %s for existing volumeID %s", v, volumeID)
|
||||
|
||||
return generateVolumeFromVolumeID(ctx, v, cr, secrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vol, err
|
||||
}
|
||||
|
||||
@ -1047,6 +1076,7 @@ func genVolFromVolumeOptions(
|
||||
rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(volOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1061,6 +1091,7 @@ func genVolFromVolumeOptions(
|
||||
// which disable all RBD image features as we expected
|
||||
if err = rbdVol.validateImageFeatures(volOptions["imageFeatures"]); err != nil {
|
||||
util.ErrorLog(ctx, "failed to validate image features %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1107,6 +1138,7 @@ func (rv *rbdVolume) validateImageFeatures(imageFeatures string) error {
|
||||
}
|
||||
}
|
||||
rv.imageFeatureSet = librbd.FeatureSetFromNames(arr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1121,6 +1153,7 @@ func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[
|
||||
rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1145,6 +1178,7 @@ func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) err
|
||||
defer image.Close()
|
||||
|
||||
_, err = image.CreateSnapshot(pOpts.RbdSnapName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1164,6 +1198,7 @@ func (rv *rbdVolume) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) err
|
||||
if errors.Is(err, librbd.ErrNotFound) {
|
||||
return util.JoinErrors(ErrSnapNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1299,6 +1334,7 @@ func (rv *rbdVolume) getImageInfo() error {
|
||||
return err
|
||||
}
|
||||
rv.CreatedAt = protoTime
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1332,6 +1368,7 @@ func (rv *rbdVolume) updateVolWithImageInfo() error {
|
||||
": (2) No such file or directory") {
|
||||
return util.JoinErrors(ErrImageNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1342,6 +1379,7 @@ func (rv *rbdVolume) updateVolWithImageInfo() error {
|
||||
}
|
||||
rv.Primary = imgInfo.Mirroring.Primary
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1391,6 +1429,7 @@ func (ri *rbdImageMetadataStash) String() string {
|
||||
if ri.RadosNamespace != "" {
|
||||
return fmt.Sprintf("%s/%s/%s", ri.Pool, ri.RadosNamespace, ri.ImageName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s", ri.Pool, ri.ImageName)
|
||||
}
|
||||
|
||||
@ -1471,6 +1510,7 @@ func updateRBDImageMetadataStash(metaDataPath, device string) error {
|
||||
return fmt.Errorf("failed to stash JSON image metadata at path: (%s) for spec:(%s) : %w",
|
||||
fPath, imgMeta.String(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1522,6 +1562,7 @@ func (rv *rbdVolume) resize(newSize int64) error {
|
||||
if resizeErr != nil {
|
||||
err = fmt.Errorf("failed to shrink image (%v) after failed allocation: %w", resizeErr, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1605,6 +1646,7 @@ func (ri *rbdImage) isThickProvisioned() (bool, error) {
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to convert %q=%q to a boolean: %w", thickProvisionMetaKey, value, err)
|
||||
}
|
||||
|
||||
return thick, nil
|
||||
}
|
||||
|
||||
@ -1699,6 +1741,7 @@ func (rv *rbdVolume) listSnapshots() ([]librbd.SnapInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return snapInfoList, nil
|
||||
}
|
||||
|
||||
@ -1719,6 +1762,7 @@ func (rv *rbdVolume) isTrashSnap(snapID uint64) (bool, error) {
|
||||
if nsType == librbd.SnapNamespaceTypeTrash {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -1830,5 +1874,6 @@ func (ri *rbdImage) addSnapshotScheduling(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -131,6 +131,7 @@ func TestValidateImageFeatures(t *testing.T) {
|
||||
err := test.rbdVol.validateImageFeatures(test.imageFeatures)
|
||||
if test.isErr {
|
||||
assert.EqualError(t, err, test.errMsg)
|
||||
|
||||
continue
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
|
@ -90,12 +90,14 @@ func getForceOption(ctx context.Context, parameters map[string]string) (bool, er
|
||||
val, ok := parameters[forceKey]
|
||||
if !ok {
|
||||
util.WarningLog(ctx, "%s is not set in parameters, setting to default (%v)", forceKey, false)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
force, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return false, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return force, nil
|
||||
}
|
||||
|
||||
@ -109,6 +111,7 @@ func getMirroringMode(ctx context.Context, parameters map[string]string) (librbd
|
||||
"%s is not set in parameters, setting to mirroringMode to default (%s)",
|
||||
imageMirroringKey,
|
||||
imageMirrorModeSnapshot)
|
||||
|
||||
return librbd.ImageMirrorModeSnapshot, nil
|
||||
}
|
||||
|
||||
@ -119,6 +122,7 @@ func getMirroringMode(ctx context.Context, parameters map[string]string) (librbd
|
||||
default:
|
||||
return mirroringMode, status.Errorf(codes.InvalidArgument, "%s %s not supported", imageMirroringKey, val)
|
||||
}
|
||||
|
||||
return mirroringMode, nil
|
||||
}
|
||||
|
||||
@ -159,6 +163,7 @@ func getSchedulingDetails(parameters map[string]string) (admin.Interval, admin.S
|
||||
return admInt, admin.NoStartTime, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return admInt, adminStartTime, nil
|
||||
}
|
||||
|
||||
@ -169,6 +174,7 @@ func validateSchedulingInterval(interval string) (admin.Interval, error) {
|
||||
if re.MatchString(interval) {
|
||||
return admin.Interval(interval), nil
|
||||
}
|
||||
|
||||
return "", errors.New("interval specified without d, h, m suffix")
|
||||
}
|
||||
|
||||
@ -195,6 +201,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
||||
|
||||
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer rs.VolumeLocks.Release(volumeID)
|
||||
@ -210,6 +217,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
// extract the mirroring mode
|
||||
@ -221,6 +229,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -228,6 +237,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
||||
err = rbdVol.enableImageMirroring(mirroringMode)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -266,6 +276,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
|
||||
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer rs.VolumeLocks.Release(volumeID)
|
||||
@ -281,6 +292,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
// extract the force option
|
||||
@ -292,6 +304,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -308,6 +321,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
err = rbdVol.disableImageMirroring(force)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// the image state can be still disabling once we disable the mirroring
|
||||
@ -315,11 +329,13 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
mirroringInfo, err = rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if mirroringInfo.State == librbd.MirrorImageDisabling {
|
||||
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID)
|
||||
}
|
||||
|
||||
return &replication.DisableVolumeReplicationResponse{}, nil
|
||||
default:
|
||||
// TODO: use string instead of int for returning valid error message
|
||||
@ -348,6 +364,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
||||
|
||||
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer rs.VolumeLocks.Release(volumeID)
|
||||
@ -363,12 +380,14 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -394,6 +413,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
||||
if strings.Contains(err.Error(), "Device or resource busy") {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -420,6 +440,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
||||
|
||||
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer rs.VolumeLocks.Release(volumeID)
|
||||
@ -435,11 +456,13 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -456,9 +479,11 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
||||
err = rbdVol.demoteImage()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return &replication.DemoteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -481,6 +506,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
|
||||
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer rs.VolumeLocks.Release(volumeID)
|
||||
@ -495,6 +521,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -503,6 +530,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
// in case of Resync the image will get deleted and gets recreated and
|
||||
// it takes time for this operation.
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
@ -523,9 +551,11 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
resp := &replication.ResyncVolumeResponse{
|
||||
Ready: false,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
ready := false
|
||||
@ -561,6 +591,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
err = rbdVol.resyncImage()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
@ -574,5 +605,6 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
resp := &replication.ResyncVolumeResponse{
|
||||
Ready: ready,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ func TestValidateSchedulingInterval(t *testing.T) {
|
||||
got, err := validateSchedulingInterval(tt.interval)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateSchedulingInterval() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
@ -145,6 +146,7 @@ func TestGetSchedulingDetails(t *testing.T) {
|
||||
interval, startTime, err := getSchedulingDetails(tt.parameters)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getSchedulingDetails() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(interval, tt.wantInterval) {
|
||||
|
@ -32,6 +32,7 @@ func createRBDClone(
|
||||
err := parentVol.createSnapshot(ctx, snap)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s: %v", snap, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -58,6 +59,7 @@ func createRBDClone(
|
||||
if delErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -68,6 +70,7 @@ func createRBDClone(
|
||||
if delErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -86,6 +89,7 @@ func cleanUpSnapshot(
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrSnapNotFound) {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %q: %v", rbdSnap, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -95,6 +99,7 @@ func cleanUpSnapshot(
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrImageNotFound) {
|
||||
util.ErrorLog(ctx, "failed to delete rbd image %q with error: %v", rbdVol, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -117,6 +122,7 @@ func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume {
|
||||
// snapshot will have the same volumeID which cases the panic in
|
||||
// copyEncryptionConfig function.
|
||||
vol.encryption = rbdSnap.encryption
|
||||
|
||||
return vol
|
||||
}
|
||||
|
||||
@ -129,8 +135,10 @@ func undoSnapshotCloning(
|
||||
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to clean up %s or %s: %v", cloneVol, rbdSnap, err)
|
||||
|
||||
return err
|
||||
}
|
||||
err = undoSnapReservation(ctx, rbdSnap, cr)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ func ExecCommand(ctx context.Context, program string, args ...string) (string, s
|
||||
if ctx != context.TODO() {
|
||||
UsefulLog(ctx, "%s", err)
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
@ -98,6 +99,7 @@ func GetPoolName(monitors string, cr *Credentials, poolID int64) (string, error)
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("failed to get pool ID %d: %w", poolID, err)
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
@ -136,6 +138,7 @@ func CreateObject(ctx context.Context, monitors string, cr *Credentials, poolNam
|
||||
if errors.Is(err, ErrPoolNotFound) {
|
||||
err = JoinErrors(ErrObjectNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer ioctx.Destroy()
|
||||
@ -149,6 +152,7 @@ func CreateObject(ctx context.Context, monitors string, cr *Credentials, poolNam
|
||||
return JoinErrors(ErrObjectExists, err)
|
||||
} else if err != nil {
|
||||
ErrorLog(ctx, "failed creating omap (%s) in pool (%s): (%v)", objectName, poolName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -170,6 +174,7 @@ func RemoveObject(ctx context.Context, monitors string, cr *Credentials, poolNam
|
||||
if errors.Is(err, ErrPoolNotFound) {
|
||||
err = JoinErrors(ErrObjectNotFound, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer ioctx.Destroy()
|
||||
@ -183,6 +188,7 @@ func RemoveObject(ctx context.Context, monitors string, cr *Credentials, poolNam
|
||||
return JoinErrors(ErrObjectNotFound, err)
|
||||
} else if err != nil {
|
||||
ErrorLog(ctx, "failed removing omap (%s) in pool (%s): (%v)", oMapName, poolName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -72,5 +72,6 @@ if any ceph commands fails it will log below error message
|
||||
// createKeyRingFile creates the keyring files to fix above error message logging.
|
||||
func createKeyRingFile() error {
|
||||
_, err := os.Create(keyRing)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -111,8 +111,10 @@ func (cp *ConnPool) getConn(unique string) *rados.Conn {
|
||||
ce, exists := cp.conns[unique]
|
||||
if exists {
|
||||
ce.get()
|
||||
|
||||
return ce.conn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -159,6 +161,7 @@ func (cp *ConnPool) Get(monitors, user, keyfile string) (*rados.Conn, error) {
|
||||
if oldConn := cp.getConn(unique); oldConn != nil {
|
||||
// there was a race, oldConn already exists
|
||||
ce.destroy()
|
||||
|
||||
return oldConn, nil
|
||||
}
|
||||
// this really is a new connection, add it to the map
|
||||
@ -176,6 +179,7 @@ func (cp *ConnPool) Copy(conn *rados.Conn) *rados.Conn {
|
||||
for _, ce := range cp.conns {
|
||||
if ce.conn == conn {
|
||||
ce.get()
|
||||
|
||||
return ce.conn
|
||||
}
|
||||
}
|
||||
@ -192,6 +196,7 @@ func (cp *ConnPool) Put(conn *rados.Conn) {
|
||||
for _, ce := range cp.conns {
|
||||
if ce.conn == conn {
|
||||
ce.put()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ func (cp *ConnPool) fakeGet(monitors, user, keyfile string) (*rados.Conn, string
|
||||
if oldConn := cp.getConn(unique); oldConn != nil {
|
||||
// there was a race, oldConn already exists
|
||||
ce.destroy()
|
||||
|
||||
return oldConn, unique, nil
|
||||
}
|
||||
// this really is a new connection, add it to the map
|
||||
@ -83,6 +84,7 @@ func TestConnPool(t *testing.T) {
|
||||
err := ioutil.WriteFile(keyfile, []byte("the-key"), 0o600)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create keyfile: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
defer os.Remove(keyfile)
|
||||
|
@ -98,6 +98,7 @@ func (cc *ClusterConnection) GetIoctx(pool string) (*rados.IOContext, error) {
|
||||
} else {
|
||||
err = fmt.Errorf("failed to open IOContext for pool %s: %w", pool, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -137,5 +138,6 @@ func (cc *ClusterConnection) DisableDiscardOnZeroedWriteSame() error {
|
||||
}
|
||||
|
||||
cc.discardOnZeroedWriteSameDisabled = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -58,6 +58,7 @@ func storeKey(key string) (string, error) {
|
||||
keyFile := tmpfile.Name()
|
||||
if keyFile == "" {
|
||||
err = fmt.Errorf("error reading temporary filename for key: %w", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -115,5 +116,6 @@ func GetMonValFromSecret(secrets map[string]string) (string, error) {
|
||||
if mons, ok := secrets[credMonitors]; ok {
|
||||
return mons, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("missing %q", credMonitors)
|
||||
}
|
||||
|
@ -87,6 +87,7 @@ func NewVolumeEncryption(id string, kms EncryptionKMS) (*VolumeEncryption, error
|
||||
}
|
||||
|
||||
ve.dekStore = dekStore
|
||||
|
||||
return ve, nil
|
||||
}
|
||||
|
||||
@ -196,6 +197,7 @@ func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) e
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save the passphrase for %s: %w", volumeID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -226,6 +228,7 @@ func generateNewEncryptionPassphrase() (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.URLEncoding.EncodeToString(bytesPassphrase), nil
|
||||
}
|
||||
|
||||
@ -233,6 +236,7 @@ func generateNewEncryptionPassphrase() (string, error) {
|
||||
func VolumeMapper(volumeID string) (mapperFile, mapperFilePath string) {
|
||||
mapperFile = mapperFilePrefix + volumeID
|
||||
mapperFilePath = path.Join(mapperFilePathPrefix, mapperFile)
|
||||
|
||||
return mapperFile, mapperFilePath
|
||||
}
|
||||
|
||||
@ -242,6 +246,7 @@ func EncryptVolume(ctx context.Context, devicePath, passphrase string) error {
|
||||
if _, _, err := LuksFormat(devicePath, passphrase); err != nil {
|
||||
return fmt.Errorf("failed to encrypt device %s with LUKS: %w", devicePath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -252,6 +257,7 @@ func OpenEncryptedVolume(ctx context.Context, devicePath, mapperFile, passphrase
|
||||
if err != nil {
|
||||
WarningLog(ctx, "failed to open LUKS device %q: %s", devicePath, stderr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -259,12 +265,14 @@ func OpenEncryptedVolume(ctx context.Context, devicePath, mapperFile, passphrase
|
||||
func CloseEncryptedVolume(ctx context.Context, mapperFile string) error {
|
||||
DebugLog(ctx, "Closing LUKS device %s", mapperFile)
|
||||
_, _, err := LuksClose(mapperFile)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// IsDeviceOpen determines if encrypted device is already open.
|
||||
func IsDeviceOpen(ctx context.Context, device string) (bool, error) {
|
||||
_, mappedFile, err := DeviceEncryptionStatus(ctx, device)
|
||||
|
||||
return (mappedFile != ""), err
|
||||
}
|
||||
|
||||
@ -279,6 +287,7 @@ func DeviceEncryptionStatus(ctx context.Context, devicePath string) (mappedDevic
|
||||
stdout, _, err := LuksStatus(mapPath)
|
||||
if err != nil {
|
||||
DebugLog(ctx, "device %s is not an active LUKS device: %v", devicePath, err)
|
||||
|
||||
return devicePath, "", nil
|
||||
}
|
||||
lines := strings.Split(string(stdout), "\n")
|
||||
|
@ -72,6 +72,7 @@ func readClusterInfo(pathToConfig, clusterID string) (*ClusterInfo, error) {
|
||||
content, err := ioutil.ReadFile(pathToConfig)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error fetching configuration for cluster ID %q: %w", clusterID, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -100,6 +101,7 @@ func Mons(pathToConfig, clusterID string) (string, error) {
|
||||
if len(cluster.Monitors) == 0 {
|
||||
return "", fmt.Errorf("empty monitor list for cluster ID (%s) in config", clusterID)
|
||||
}
|
||||
|
||||
return strings.Join(cluster.Monitors, ","), nil
|
||||
}
|
||||
|
||||
@ -109,6 +111,7 @@ func RadosNamespace(pathToConfig, clusterID string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cluster.RadosNamespace, nil
|
||||
}
|
||||
|
||||
@ -122,6 +125,7 @@ func CephFSSubvolumeGroup(pathToConfig, clusterID string) (string, error) {
|
||||
if cluster.CephFS.SubvolumeGroup == "" {
|
||||
return defaultCsiSubvolumeGroup, nil
|
||||
}
|
||||
|
||||
return cluster.CephFS.SubvolumeGroup, nil
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
// ValidateURL validates the url.
|
||||
func ValidateURL(c *Config) error {
|
||||
_, err := url.Parse(c.MetricsPath)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,7 @@ func (vl *VolumeLocks) TryAcquire(volumeID string) bool {
|
||||
return false
|
||||
}
|
||||
vl.locks.Insert(volumeID)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -96,6 +97,7 @@ func NewOperationLock() *OperationLock {
|
||||
lock[cloneOpt] = make(map[string]int)
|
||||
lock[restoreOp] = make(map[string]int)
|
||||
lock[expandOp] = make(map[string]int)
|
||||
|
||||
return &OperationLock{
|
||||
locks: lock,
|
||||
}
|
||||
@ -176,6 +178,7 @@ func (ol *OperationLock) tryAcquire(op operation, volumeID string) error {
|
||||
default:
|
||||
return fmt.Errorf("%v operation not supported", op)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -44,5 +44,6 @@ func NewK8sClient() *k8s.Clientset {
|
||||
if err != nil {
|
||||
FatalLogMsg("Failed to create client with error: %v\n", err)
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
@ -181,6 +181,7 @@ func getKMSProvider(config map[string]interface{}) (string, error) {
|
||||
return "", fmt.Errorf("could not convert KMS provider"+
|
||||
"type (%v) to string", providerName)
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
@ -191,6 +192,7 @@ func getKMSProvider(config map[string]interface{}) (string, error) {
|
||||
return "", fmt.Errorf("could not convert KMS provider"+
|
||||
"type (%v) to string", providerName)
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ func Log(ctx context.Context, format string) string {
|
||||
return a + format
|
||||
}
|
||||
a += fmt.Sprintf("Req-ID: %v ", reqID)
|
||||
|
||||
return a + format
|
||||
}
|
||||
|
||||
|
@ -53,6 +53,7 @@ func getCgroupPidsFile() (string, error) {
|
||||
}
|
||||
if parts[1] == "pids" {
|
||||
slice = parts[2]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -61,6 +62,7 @@ func getCgroupPidsFile() (string, error) {
|
||||
}
|
||||
|
||||
pidsMax := fmt.Sprintf(sysPidsMaxFmt, slice)
|
||||
|
||||
return pidsMax, nil
|
||||
}
|
||||
|
||||
@ -91,6 +93,7 @@ func GetPIDLimit() (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return maxPids, nil
|
||||
}
|
||||
|
||||
@ -115,6 +118,7 @@ func SetPIDLimit(limit int) error {
|
||||
_, err = f.WriteString(limitStr)
|
||||
if err != nil {
|
||||
f.Close() // #nosec: a write error will be more useful to return
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -64,6 +64,7 @@ func initSecretsKMS(secrets map[string]string) (EncryptionKMS, error) {
|
||||
if !ok {
|
||||
return nil, errors.New("missing encryption passphrase in secrets")
|
||||
}
|
||||
|
||||
return SecretsKMS{passphrase: passphraseValue}, nil
|
||||
}
|
||||
|
||||
@ -267,6 +268,7 @@ func generateCipher(passphrase, salt string) (cipher.AEAD, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aead, nil
|
||||
}
|
||||
|
||||
|
@ -48,11 +48,13 @@ func stripKey(out []string) bool {
|
||||
for i := range out {
|
||||
if strings.HasPrefix(out[i], keyArg) {
|
||||
out[i] = strippedKey
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(out[i], keyFileArg) {
|
||||
out[i] = strippedKeyFile
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -102,6 +102,7 @@ func GetTopologyFromDomainLabels(domainLabels, nodeName, driverName string) (map
|
||||
missingLabels = append(missingLabels, key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("missing domain labels %v on node %q", missingLabels, nodeName)
|
||||
}
|
||||
|
||||
@ -173,6 +174,7 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
|
||||
for _, value := range *topologyPools {
|
||||
if value.PoolName == poolName {
|
||||
topologyPool = append(topologyPool, value)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -230,6 +232,7 @@ func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi
|
||||
for _, segment := range topologyPool.DomainSegments {
|
||||
if domainValue, ok := domainMap[segment.DomainLabel]; !ok || domainValue != segment.DomainValue {
|
||||
mismatch = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -235,6 +235,7 @@ func TestFindPoolAndTopology(t *testing.T) {
|
||||
topologyPrefix+"/"+label1+l1Value1, topologyPrefix+"/"+label2+l2Value1,
|
||||
poolName, topoSegment)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
// Test nil values
|
||||
|
@ -52,6 +52,7 @@ func RoundOffBytes(bytes int64) int64 {
|
||||
num = int64(math.Ceil(floatBytes / helpers.GiB))
|
||||
num *= helpers.GiB
|
||||
}
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
@ -131,10 +132,12 @@ func ValidateDriverName(driverName string) error {
|
||||
for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {
|
||||
if err == nil {
|
||||
err = errors.New(msg)
|
||||
|
||||
continue
|
||||
}
|
||||
err = fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -145,6 +148,7 @@ func GetKernelVersion() (string, error) {
|
||||
if err := unix.Uname(&utsname); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimRight(string(utsname.Release[:]), "\x00"), nil
|
||||
}
|
||||
|
||||
@ -213,6 +217,7 @@ func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool
|
||||
version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)
|
||||
if err != nil {
|
||||
ErrorLogMsg("%v", err)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -238,6 +243,7 @@ func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool
|
||||
}
|
||||
}
|
||||
ErrorLogMsg("kernel %s does not support required features", release)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -282,6 +288,7 @@ func checkDirExists(p string) bool {
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -299,6 +306,7 @@ func IsMountPoint(p string) (bool, error) {
|
||||
// Mount mounts the source to target path.
|
||||
func Mount(source, target, fstype string, options []string) error {
|
||||
dummyMount := mount.New("")
|
||||
|
||||
return dummyMount.Mount(source, target, fstype, options)
|
||||
}
|
||||
|
||||
@ -354,5 +362,6 @@ func getKeys(m map[string]interface{}) []string {
|
||||
func CallStack() string {
|
||||
stack := make([]byte, 2048)
|
||||
_ = runtime.Stack(stack, false)
|
||||
|
||||
return string(stack)
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
||||
"staging path %s does not exist on node",
|
||||
req.GetStagingTargetPath())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -93,5 +94,6 @@ func CheckReadOnlyManyIsSupported(req *csi.CreateVolumeRequest) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -105,6 +105,7 @@ func setConfigString(option *string, config map[string]interface{}, key string)
|
||||
}
|
||||
|
||||
*option = s
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -389,6 +390,7 @@ func detectAuthMountPath(path string) (string, error) {
|
||||
for _, part := range parts {
|
||||
if part == "auth" {
|
||||
match = true
|
||||
|
||||
continue
|
||||
}
|
||||
if part == "login" {
|
||||
|
@ -134,6 +134,7 @@ func transformConfig(svMap map[string]interface{}) (map[string]interface{}, erro
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to Unmarshal the CSI vault configuration: %w", err)
|
||||
}
|
||||
|
||||
return jsonMap, nil
|
||||
}
|
||||
|
||||
@ -577,5 +578,6 @@ func fetchTenantConfig(config map[string]interface{}, tenant string) (map[string
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return tenantConfig, true
|
||||
}
|
||||
|
@ -138,6 +138,7 @@ func TestStdVaultToCSIConfig(t *testing.T) {
|
||||
err := json.Unmarshal([]byte(vaultConfigMap), sv)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user