From 3bc6771df8d1612f296ed79cf3f68c6535d1cf28 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 24 Jun 2019 13:28:39 +0530 Subject: [PATCH] Migrate from framwork.Logf and also use new nsenter interface Signed-off-by: Humble Chirammal --- e2e/cephfs.go | 3 ++- e2e/deploy-rook.go | 9 +++++---- e2e/rbd.go | 13 +++++++------ e2e/utils.go | 45 ++++++++++++++++++++++--------------------- pkg/rbd/nodeserver.go | 4 ++-- pkg/rbd/rbd.go | 3 ++- 6 files changed, 41 insertions(+), 36 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 4ab6a5f61..3d7fcb95c 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -7,6 +7,7 @@ import ( . "github.com/onsi/ginkgo" // nolint "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) var ( @@ -45,7 +46,7 @@ var _ = Describe("cephfs", func() { for _, file := range cephfsFiles { res, err := framework.RunKubectl("delete", "-f", cephfsDirPath+file.Name()) if err != nil { - framework.Logf("failed to delete resource in %s with err %v", res, err) + e2elog.Logf("failed to delete resource in %s with err %v", res, err) } } deleteResource(cephfsExamplePath + "secret.yaml") diff --git a/e2e/deploy-rook.go b/e2e/deploy-rook.go index 496f052af..02a69968f 100644 --- a/e2e/deploy-rook.go +++ b/e2e/deploy-rook.go @@ -8,6 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) var ( @@ -21,7 +22,7 @@ func formRookURL(version string) { } func getK8sClient() kubernetes.Interface { - framework.Logf("Creating a kubernetes client") + e2elog.Logf("Creating a kubernetes client") client, err := framework.LoadClientset() Expect(err).Should(BeNil()) return client @@ -51,7 +52,7 @@ func deleteFileSystem() { commonPath := fmt.Sprintf("%s/%s", rookURL, "filesystem-test.yaml") _, err := framework.RunKubectl("delete", "-f", commonPath) if err != nil { - framework.Logf("failed to delete file-system %v", err) + e2elog.Logf("failed to delete file-system %v", err) } } @@ -59,7 +60,7 @@ func deleteRBDPool() { commonPath := fmt.Sprintf("%s/%s", rookURL, "pool-test.yaml") _, err := framework.RunKubectl("delete", "-f", commonPath) if err != nil { - framework.Logf("failed to delete pool %v", err) + e2elog.Logf("failed to delete pool %v", err) } } @@ -118,6 +119,6 @@ func tearDownRook() { commonPath := fmt.Sprintf("%s/%s", rookURL, "common.yaml") _, err := framework.RunKubectl("delete", "-f", commonPath) if err != nil { - framework.Logf("failed to delete rook common %v", err) + e2elog.Logf("failed to delete rook common %v", err) } } diff --git a/e2e/rbd.go b/e2e/rbd.go index b2cdc523f..8882b2712 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -7,6 +7,7 @@ import ( . "github.com/onsi/ginkgo" // nolint "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) var ( @@ -48,7 +49,7 @@ var _ = Describe("RBD", func() { for _, file := range rbdFiles { res, err := framework.RunKubectl("delete", "-f", rbdDirPath+file.Name()) if err != nil { - framework.Logf("failed to delete resource in %s with err %v", res, err) + e2elog.Logf("failed to delete resource in %s with err %v", res, err) } } deleteRBDPool() @@ -96,7 +97,7 @@ var _ = Describe("RBD", func() { } pvc.Namespace = f.UniqueName - framework.Logf("The PVC template %+v", pvc) + e2elog.Logf("The PVC template %+v", pvc) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { Fail(err.Error()) @@ -104,7 +105,7 @@ var _ = Describe("RBD", func() { // validate created backend rbd images images := listRBDImages(f) if len(images) != 1 { - framework.Logf("backend image count %d expected image count %d", len(images), 1) + e2elog.Logf("backend image count %d expected image count %d", len(images), 1) Fail("validate backend image failed") } snap := getSnapshot(snapshotPath) @@ -121,7 +122,7 @@ var _ = Describe("RBD", func() { Fail(err.Error()) } if len(snapList) != 1 { - framework.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1) + e2elog.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1) Fail("validate backend snapshot failed") } @@ -167,7 +168,7 @@ var _ = Describe("RBD", func() { // validate created backend rbd images images := listRBDImages(f) if len(images) != totalCount { - framework.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount) + e2elog.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount) Fail("validate multiple pvc failed") } @@ -184,7 +185,7 @@ var _ = Describe("RBD", func() { // validate created backend rbd images images = listRBDImages(f) if len(images) > 0 { - framework.Logf("left out rbd backend images count %d", len(images)) + e2elog.Logf("left out rbd backend images count %d", len(images)) Fail("validate multiple pvc failed") } }) diff --git a/e2e/utils.go b/e2e/utils.go index bb77ad248..b13ea5544 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -24,6 +24,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" ) @@ -47,13 +48,13 @@ type snapInfo struct { func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error { timeout := time.Duration(t) * time.Minute start := time.Now() - framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", + e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) return wait.PollImmediate(poll, timeout, func() (bool, error) { ds, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{}) if err != nil { - framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) + e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) if strings.Contains(err.Error(), "not found") { return false, nil } @@ -64,7 +65,7 @@ func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error { } dNum := ds.Status.DesiredNumberScheduled ready := ds.Status.NumberReady - framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) + e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) if ready != dNum { return false, nil } @@ -97,7 +98,7 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er } reason = fmt.Sprintf("deployment status: %#v", deployment.Status) - framework.Logf(reason) + e2elog.Logf(reason) return false, nil }) @@ -325,13 +326,13 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai Expect(err).Should(BeNil()) name := pvc.Name start := time.Now() - framework.Logf("Waiting up to %v to be in Bound state", pvc) + e2elog.Logf("Waiting up to %v to be in Bound state", pvc) return wait.PollImmediate(poll, timeout, func() (bool, error) { - framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds())) + e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds())) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(name, metav1.GetOptions{}) if err != nil { - framework.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err) + e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err) if testutils.IsRetryableAPIError(err) { return false, nil } @@ -363,7 +364,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai nameSpace := pvc.Namespace name := pvc.Name var err error - framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace) + e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace) pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{}) if err != nil { @@ -381,7 +382,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai start := time.Now() return wait.PollImmediate(poll, timeout, func() (bool, error) { // Check that the PVC is really deleted. - framework.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds())) + e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds())) pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{}) if err == nil { return false, nil @@ -440,7 +441,7 @@ func getPodName(ns string, c kubernetes.Interface, opt *metav1.ListOptions) stri func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error { timeout := time.Duration(t) * time.Minute start := time.Now() - framework.Logf("Waiting up to %v to be in Running state", name) + e2elog.Logf("Waiting up to %v to be in Running state", name) return wait.PollImmediate(poll, timeout, func() (bool, error) { pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) if err != nil { @@ -452,7 +453,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) er case v1.PodFailed, v1.PodSucceeded: return false, conditions.ErrPodCompleted } - framework.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds())) + e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds())) return false, nil }) } @@ -464,14 +465,14 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error { return err } start := time.Now() - framework.Logf("Waiting for pod %v to be deleted", name) + e2elog.Logf("Waiting for pod %v to be deleted", name) return wait.PollImmediate(poll, timeout, func() (bool, error) { _, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) if apierrs.IsNotFound(err) { return true, nil } - framework.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) + e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) if err != nil { return false, err } @@ -503,7 +504,7 @@ func checkCephPods(ns string, c kubernetes.Interface, count, t int, opt *metav1. return false, err } - framework.Logf("pod count is %d expected count %d (%d seconds elapsed)", len(podList.Items), count, int(time.Since(start).Seconds())) + e2elog.Logf("pod count is %d expected count %d (%d seconds elapsed)", len(podList.Items), count, int(time.Since(start).Seconds())) if len(podList.Items) >= count { return true, nil @@ -555,7 +556,7 @@ func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) { Fail(err.Error()) } pvc.Namespace = f.UniqueName - framework.Logf("The PVC template %+v", pvc) + e2elog.Logf("The PVC template %+v", pvc) app, err := loadApp(appPath) if err != nil { @@ -581,7 +582,7 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) { } pvc.Namespace = f.UniqueName pvc.Name = f.UniqueName - framework.Logf("The PVC template %+v", pvc) + e2elog.Logf("The PVC template %+v", pvc) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { Fail(err.Error()) @@ -660,18 +661,18 @@ func createSnapshot(snap *v1alpha1.VolumeSnapshot, t int) error { if err != nil { return err } - framework.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace) + e2elog.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace) timeout := time.Duration(t) * time.Minute name := snap.Name start := time.Now() - framework.Logf("Waiting up to %v to be in Ready state", snap) + e2elog.Logf("Waiting up to %v to be in Ready state", snap) return wait.PollImmediate(poll, timeout, func() (bool, error) { - framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds())) + e2elog.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds())) snaps, err := sclient.VolumeSnapshots(snap.Namespace).Get(name, metav1.GetOptions{}) if err != nil { - framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err) + e2elog.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err) if testutils.IsRetryableAPIError(err) { return false, nil } @@ -700,10 +701,10 @@ func deleteSnapshot(snap *v1alpha1.VolumeSnapshot, t int) error { timeout := time.Duration(t) * time.Minute name := snap.Name start := time.Now() - framework.Logf("Waiting up to %v to be deleted", snap) + e2elog.Logf("Waiting up to %v to be deleted", snap) return wait.PollImmediate(poll, timeout, func() (bool, error) { - framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds())) + e2elog.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds())) _, err := sclient.VolumeSnapshots(snap.Namespace).Get(name, metav1.GetOptions{}) if err == nil { return false, nil diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 44a5a84b4..e936bbc9a 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -160,7 +160,7 @@ func (ns *NodeServer) mountVolume(req *csi.NodePublishVolumeRequest, devicePath func (ns *NodeServer) createTargetPath(targetPath string, isBlock bool) (bool, error) { // Check if that target path exists properly - notMnt, err := ns.mounter.IsNotMountPoint(targetPath) + notMnt, err := mount.IsNotMountPoint(ns.mounter, targetPath) if err != nil { if os.IsNotExist(err) { if isBlock { @@ -209,7 +209,7 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu } }() - notMnt, err := ns.mounter.IsNotMountPoint(targetPath) + notMnt, err := mount.IsNotMountPoint(ns.mounter, targetPath) if err != nil { if os.IsNotExist(err) { // targetPath has already been deleted diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index b9ff3d1df..73c898d8c 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -23,6 +23,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" + nsutil "k8s.io/kubernetes/pkg/volume/util/nsenter" "k8s.io/utils/exec" "k8s.io/utils/nsenter" ) @@ -89,7 +90,7 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err if err != nil { return nil, err } - mounter = mount.NewNsenterMounter("", ne) + mounter = nsutil.NewMounter("", ne) } return &NodeServer{ DefaultNodeServer: csicommon.NewDefaultNodeServer(d),