mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 20:00:23 +00:00
Changes to e2e to accomodate client-go changes and RunKubectlInput
With client-go v1.18.0 there is a change where Signatures on methods in generated clientsets, dynamic, metadata, and scale clients have been modified to accept context.Context as a first argument. Signatures of Create, Update, and Patch methods have been updated to accept CreateOptions, UpdateOptions and PatchOptions respectively. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference The framework.RunkubectlInput now accepts namespace as the first parameter which is also accommodated with this PR. Signed-off-by: Humble Chirammal hchiramm@redhat.com
This commit is contained in:
parent
60a394b397
commit
4c96ad3c85
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo" // nolint
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@ -30,7 +29,7 @@ func deployCephfsPlugin() {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete provisioner rbac %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
}
|
||||
@ -39,7 +38,7 @@ func deployCephfsPlugin() {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete nodeplugin rbac %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
@ -57,7 +56,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisioner, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs provisioner %v", action, err)
|
||||
}
|
||||
@ -66,7 +65,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs provisioner rbac %v", action, err)
|
||||
}
|
||||
@ -75,7 +74,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs provisioner psp %v", action, err)
|
||||
}
|
||||
@ -84,7 +83,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePlugin, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs nodeplugin %v", action, err)
|
||||
}
|
||||
@ -93,7 +92,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs nodeplugin rbac %v", action, err)
|
||||
}
|
||||
@ -102,7 +101,7 @@ func createORDeleteCephfsResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s cephfs nodeplugin psp %v", action, err)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
. "github.com/onsi/gomega" // nolint
|
||||
@ -21,7 +22,7 @@ var (
|
||||
func deployVault(c kubernetes.Interface, deployTimeout int) {
|
||||
// hack to make helm E2E pass as helm charts creates this configmap as part
|
||||
// of cephcsi deployment
|
||||
_, err := framework.RunKubectl("delete", "cm", "ceph-csi-encryption-kms-config", "--namespace", cephCSINamespace, "--ignore-not-found=true")
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "cm", "ceph-csi-encryption-kms-config", "--namespace", cephCSINamespace, "--ignore-not-found=true")
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
createORDeleteVault("create")
|
||||
@ -29,7 +30,7 @@ func deployVault(c kubernetes.Interface, deployTimeout int) {
|
||||
LabelSelector: "app=vault",
|
||||
}
|
||||
|
||||
pods, err := c.CoreV1().Pods(cephCSINamespace).List(opt)
|
||||
pods, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(pods.Items)).Should(Equal(1))
|
||||
name := pods.Items[0].Name
|
||||
@ -50,7 +51,7 @@ func createORDeleteVault(action string) {
|
||||
data = strings.ReplaceAll(data, "vault.default", "vault."+cephCSINamespace)
|
||||
|
||||
data = strings.ReplaceAll(data, "value: default", "value: "+cephCSINamespace)
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s vault statefulset %v", action, err)
|
||||
}
|
||||
@ -59,7 +60,7 @@ func createORDeleteVault(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", vaultExamplePath+vaultRBACPath, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s vault statefulset %v", action, err)
|
||||
}
|
||||
@ -69,7 +70,7 @@ func createORDeleteVault(action string) {
|
||||
e2elog.Logf("failed to read content from %s %v", vaultExamplePath+vaultConfigPath, err)
|
||||
}
|
||||
data = strings.ReplaceAll(data, "default", cephCSINamespace)
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s vault config map %v", action, err)
|
||||
}
|
||||
@ -78,7 +79,7 @@ func createORDeleteVault(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", vaultExamplePath+vaultPSPPath, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s vault psp %v", action, err)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ limitations under the License.
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -28,7 +29,7 @@ func logsCSIPods(label string, c clientset.Interface) {
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: label,
|
||||
}
|
||||
podList, err := c.CoreV1().Pods(cephCSINamespace).List(opt)
|
||||
podList, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to list pods with selector %s %v", label, err)
|
||||
return
|
||||
@ -62,7 +63,7 @@ func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName
|
||||
Name(podName).SubResource("log").
|
||||
Param("container", containerName).
|
||||
Param("previous", strconv.FormatBool(true)).
|
||||
Do().
|
||||
Do(context.TODO()).
|
||||
Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
17
e2e/rbd.go
17
e2e/rbd.go
@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
|
||||
. "github.com/onsi/ginkgo" // nolint
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@ -31,7 +30,7 @@ func deployRBDPlugin() {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete provisioner rbac %s %v", rbdDirPath+rbdProvisionerRBAC, err)
|
||||
}
|
||||
@ -40,7 +39,7 @@ func deployRBDPlugin() {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete nodeplugin rbac %s %v", rbdDirPath+rbdNodePluginRBAC, err)
|
||||
}
|
||||
@ -57,7 +56,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisioner, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s rbd provisioner %v", action, err)
|
||||
}
|
||||
@ -66,7 +65,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s provisioner rbac %v", action, err)
|
||||
}
|
||||
@ -75,7 +74,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerPSP, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s provisioner psp %v", action, err)
|
||||
}
|
||||
@ -84,7 +83,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePlugin, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s nodeplugin %v", action, err)
|
||||
}
|
||||
@ -93,7 +92,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s nodeplugin rbac %v", action, err)
|
||||
}
|
||||
@ -102,7 +101,7 @@ func createORDeleteRbdResouces(action string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginPSP, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, action, ns, "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to %s nodeplugin psp %v", action, err)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -22,21 +23,21 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
updatedPVC := pvc.DeepCopy()
|
||||
var err error
|
||||
|
||||
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvcName, metav1.GetOptions{})
|
||||
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching pvc %q with %v", pvcName, err)
|
||||
}
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
|
||||
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Update(updatedPVC)
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Update(context.TODO(), updatedPVC, metav1.UpdateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Resized state", pvc)
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", updatedPVC.Name, int(time.Since(start).Seconds()))
|
||||
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Get(pvcName, metav1.GetOptions{})
|
||||
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(updatedPVC.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc in namespace: '%s': %v", updatedPVC.Namespace, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -92,7 +93,7 @@ func resizePVCAndValidateSize(pvcPath, appPath string, f *framework.Framework) e
|
||||
LabelSelector: "app=resize-pvc",
|
||||
}
|
||||
|
||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@ -115,14 +116,14 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
|
||||
pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt)
|
||||
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(pv)
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Create API error: %v", err)
|
||||
}
|
||||
|
||||
pvc := getStaticPVC(pvcName, pvName, size, ns, sc, isBlock)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Create API error: %v", err)
|
||||
}
|
||||
@ -144,12 +145,12 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, &metav1.DeleteOptions{})
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
|
||||
err = c.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
73
e2e/utils.go
73
e2e/utils.go
@ -1,6 +1,7 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -74,13 +75,13 @@ func createNamespace(c clientset.Interface, name string) error {
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
_, err := c.CoreV1().Namespaces().Create(ns)
|
||||
_, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||
if err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
if apierrs.IsNotFound(err) {
|
||||
@ -97,12 +98,12 @@ func createNamespace(c clientset.Interface, name string) error {
|
||||
|
||||
func deleteNamespace(c clientset.Interface, name string) error {
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
err := c.CoreV1().Namespaces().Delete(name, nil)
|
||||
err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
Fail(err.Error())
|
||||
}
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
@ -131,7 +132,7 @@ func waitForDaemonSets(name, ns string, c clientset.Interface, t int) error {
|
||||
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
@ -163,7 +164,7 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er
|
||||
)
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -191,7 +192,7 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er
|
||||
|
||||
func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) framework.ExecOptions {
|
||||
cmd := []string{"/bin/sh", "-c", c}
|
||||
podList, err := f.PodClientNS(ns).List(*opt)
|
||||
podList, err := f.PodClientNS(ns).List(context.TODO(), *opt)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(podList.Items).NotTo(BeNil())
|
||||
Expect(err).Should(BeNil())
|
||||
@ -231,7 +232,7 @@ func getMons(ns string, c kubernetes.Interface) []string {
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: "app=rook-ceph-mon",
|
||||
}
|
||||
svcList, err := c.CoreV1().Services(ns).List(opt)
|
||||
svcList, err := c.CoreV1().Services(ns).List(context.TODO(), opt)
|
||||
Expect(err).Should(BeNil())
|
||||
services := make([]string, 0)
|
||||
for i := range svcList.Items {
|
||||
@ -289,7 +290,7 @@ func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, en
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
sc.Namespace = cephCSINamespace
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
_, err := c.StorageV1().StorageClasses().Create(&sc)
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
}
|
||||
|
||||
@ -319,7 +320,7 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, param
|
||||
sc.Parameters[k] = v
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err := c.StorageV1().StorageClasses().Create(&sc)
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
}
|
||||
|
||||
@ -353,7 +354,7 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, param
|
||||
|
||||
func deleteConfigMap(pluginPath string) {
|
||||
path := pluginPath + configMap
|
||||
_, err := framework.RunKubectl("delete", "-f", path, ns)
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete configmap %v", err)
|
||||
}
|
||||
@ -388,14 +389,14 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
||||
cm.Namespace = cephCSINamespace
|
||||
// if the configmap is present update it,during cephcsi helm charts
|
||||
// deployment empty configmap gets created we need to override it
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(cm.Name, metav1.GetOptions{})
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{})
|
||||
|
||||
if err == nil {
|
||||
_, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(&cm)
|
||||
_, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||
Expect(updateErr).Should(BeNil())
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(&cm)
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
Expect(err).Should(BeNil())
|
||||
@ -426,7 +427,7 @@ func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) {
|
||||
delete(sc.StringData, "userID")
|
||||
delete(sc.StringData, "userKey")
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(&sc)
|
||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
}
|
||||
|
||||
@ -441,7 +442,7 @@ func createRBDSecret(c kubernetes.Interface, f *framework.Framework) {
|
||||
sc.StringData["userID"] = "admin"
|
||||
sc.StringData["userKey"] = adminKey
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(&sc)
|
||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
err = updateSecretForEncryption(c)
|
||||
@ -452,26 +453,26 @@ func createRBDSecret(c kubernetes.Interface, f *framework.Framework) {
|
||||
// include the encyption key
|
||||
// TODO in cephcsi we need to create own users in ceph cluster and use it for E2E
|
||||
func updateSecretForEncryption(c kubernetes.Interface) error {
|
||||
secrets, err := c.CoreV1().Secrets(rookNamespace).Get(rbdProvisionerSecretName, metav1.GetOptions{})
|
||||
secrets, err := c.CoreV1().Secrets(rookNamespace).Get(context.TODO(), rbdProvisionerSecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secrets.Data["encryptionPassphrase"] = []byte("test_passphrase")
|
||||
|
||||
_, err = c.CoreV1().Secrets(rookNamespace).Update(secrets)
|
||||
_, err = c.CoreV1().Secrets(rookNamespace).Update(context.TODO(), secrets, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secrets, err = c.CoreV1().Secrets(rookNamespace).Get(rbdNodePluginSecretName, metav1.GetOptions{})
|
||||
secrets, err = c.CoreV1().Secrets(rookNamespace).Get(context.TODO(), rbdNodePluginSecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secrets.Data["encryptionPassphrase"] = []byte("test_passphrase")
|
||||
|
||||
_, err = c.CoreV1().Secrets(rookNamespace).Update(secrets)
|
||||
_, err = c.CoreV1().Secrets(rookNamespace).Update(context.TODO(), secrets, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -483,7 +484,7 @@ func deleteResource(scPath string) {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", scPath, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(data, ns, "delete", "-f", "-")
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, ns, "delete", "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete %s %v", scPath, err)
|
||||
}
|
||||
@ -503,7 +504,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
pv := &v1.PersistentVolume{}
|
||||
var err error
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
name := pvc.Name
|
||||
start := time.Now()
|
||||
@ -511,7 +512,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(name, metav1.GetOptions{})
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -527,7 +528,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -549,16 +550,16 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
var err error
|
||||
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{})
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(name, &metav1.DeleteOptions{})
|
||||
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete of PVC %v failed: %v", name, err)
|
||||
}
|
||||
@ -566,7 +567,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PVC is really deleted.
|
||||
e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(name, metav1.GetOptions{})
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -575,7 +576,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
}
|
||||
|
||||
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||
_, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -598,7 +599,7 @@ func loadApp(path string) (*v1.Pod, error) {
|
||||
}
|
||||
|
||||
func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
|
||||
_, err := c.CoreV1().Pods(app.Namespace).Create(app)
|
||||
_, err := c.CoreV1().Pods(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -610,7 +611,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) er
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Running state", name)
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -627,14 +628,14 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) er
|
||||
|
||||
func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
err := c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting for pod %v to be deleted", name)
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
@ -726,12 +727,12 @@ func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) {
|
||||
|
||||
func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (string, string, error) {
|
||||
c := f.ClientSet.CoreV1()
|
||||
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(pvcName, metav1.GetOptions{})
|
||||
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
pv, err := c.PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@ -827,7 +828,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor
|
||||
}
|
||||
|
||||
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
_, err := framework.RunKubectl("delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns))
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns))
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete pod %v", err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user