mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
564
e2e/cephfs.go
564
e2e/cephfs.go
File diff suppressed because it is too large
Load Diff
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -106,7 +105,7 @@ func createCephfsStorageClass(
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -141,13 +140,13 @@ func createCephfsSecret(f *framework.Framework, secretName, userName, userKey st
|
||||
func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
|
||||
framework.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pod: %w", err)
|
||||
}
|
||||
pvc, err := getPersistentVolumeClaim(f.ClientSet, f.UniqueName, pvcName)
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
|
||||
framework.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pvc: %w", err)
|
||||
}
|
||||
@ -163,7 +162,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
cephFSContainerName,
|
||||
cephCSINamespace)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||
framework.Logf("StdErr occurred: %s", stdErr)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -339,7 +338,7 @@ func getSnapName(snapNamespace, snapName string) (string, error) {
|
||||
snapIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
|
||||
snapID := snapIDRegex.FindString(*sc.Status.SnapshotHandle)
|
||||
snapshotName := fmt.Sprintf("csi-snap-%s", snapID)
|
||||
e2elog.Logf("snapshotName= %s", snapshotName)
|
||||
framework.Logf("snapshotName= %s", snapshotName)
|
||||
|
||||
return snapshotName, nil
|
||||
}
|
||||
@ -464,7 +463,7 @@ func validateFscryptAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *fra
|
||||
if !destroyed {
|
||||
return fmt.Errorf("passphrased was not destroyed: %s", msg)
|
||||
} else if msg != "" {
|
||||
e2elog.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
framework.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -479,17 +478,17 @@ func validateFscryptClone(
|
||||
) {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label := make(map[string]string)
|
||||
label[appKey] = appLabel
|
||||
@ -501,18 +500,18 @@ func validateFscryptClone(
|
||||
}
|
||||
wErr := writeDataInPod(app, &opt, f)
|
||||
if wErr != nil {
|
||||
e2elog.Failf("failed to write data from application %v", wErr)
|
||||
framework.Failf("failed to write data from application %v", wErr)
|
||||
}
|
||||
|
||||
pvcClone, err := loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone, err := loadApp(appSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appClone.Namespace = f.UniqueName
|
||||
appClone.Labels = map[string]string{
|
||||
@ -521,50 +520,50 @@ func validateFscryptClone(
|
||||
|
||||
err = createPVCAndApp(f.UniqueName, f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC or application (%s): %v", f.UniqueName, err)
|
||||
framework.Failf("failed to create PVC or application (%s): %v", f.UniqueName, err)
|
||||
}
|
||||
|
||||
_, csiVolumeHandle, err := getInfoFromPVC(pvcClone.Namespace, pvcClone.Name, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get pvc info: %s", err)
|
||||
framework.Failf("failed to get pvc info: %s", err)
|
||||
}
|
||||
|
||||
if kms != noKMS && kms.canGetPassphrase() {
|
||||
// check new passphrase created
|
||||
stdOut, stdErr := kms.getPassphrase(f, csiVolumeHandle)
|
||||
if stdOut != "" {
|
||||
e2elog.Logf("successfully read the passphrase from vault: %s", stdOut)
|
||||
framework.Logf("successfully read the passphrase from vault: %s", stdOut)
|
||||
}
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to read passphrase from vault: %s", stdErr)
|
||||
framework.Failf("failed to read passphrase from vault: %s", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
// delete parent pvc
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
err = deletePVCAndApp(f.UniqueName, f, pvcClone, appClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application (%s): %v", f.UniqueName, err)
|
||||
framework.Failf("failed to delete PVC or application (%s): %v", f.UniqueName, err)
|
||||
}
|
||||
|
||||
if kms != noKMS && kms.canGetPassphrase() {
|
||||
// check passphrase deleted
|
||||
stdOut, _ := kms.getPassphrase(f, csiVolumeHandle)
|
||||
if stdOut != "" {
|
||||
e2elog.Failf("passphrase found in vault while should be deleted: %s", stdOut)
|
||||
framework.Failf("passphrase found in vault while should be deleted: %s", stdOut)
|
||||
}
|
||||
}
|
||||
|
||||
if kms != noKMS && kms.canVerifyKeyDestroyed() {
|
||||
destroyed, msg := kms.verifyKeyDestroyed(f, csiVolumeHandle)
|
||||
if !destroyed {
|
||||
e2elog.Failf("passphrased was not destroyed: %s", msg)
|
||||
framework.Failf("passphrased was not destroyed: %s", msg)
|
||||
} else if msg != "" {
|
||||
e2elog.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
framework.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
func validateBiggerCloneFromPVC(f *framework.Framework,
|
||||
@ -60,14 +59,14 @@ func validateBiggerCloneFromPVC(f *framework.Framework,
|
||||
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appClone.Namespace = f.UniqueName
|
||||
appClone.Labels = label
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
. "github.com/onsi/gomega" // nolint
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -69,7 +69,7 @@ func deleteVault() {
|
||||
func createORDeleteVault(action kubectlAction) {
|
||||
data, err := replaceNamespaceInTemplate(vaultExamplePath + vaultServicePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultServicePath, err)
|
||||
framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultServicePath, err)
|
||||
}
|
||||
|
||||
data = strings.ReplaceAll(data, "vault.default", "vault."+cephCSINamespace)
|
||||
@ -77,26 +77,26 @@ func createORDeleteVault(action kubectlAction) {
|
||||
data = strings.ReplaceAll(data, "value: default", "value: "+cephCSINamespace)
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s vault statefulset %v", action, err)
|
||||
framework.Failf("failed to %s vault statefulset %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultRBACPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultRBACPath, err)
|
||||
framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultRBACPath, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s vault statefulset %v", action, err)
|
||||
framework.Failf("failed to %s vault statefulset %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultConfigPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultConfigPath, err)
|
||||
framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultConfigPath, err)
|
||||
}
|
||||
data = strings.ReplaceAll(data, "default", cephCSINamespace)
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s vault configmap %v", action, err)
|
||||
framework.Failf("failed to %s vault configmap %v", action, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// execCommandInPodWithName run command in pod using podName.
|
||||
@ -44,7 +44,7 @@ func execCommandInPodWithName(
|
||||
nameSpace string,
|
||||
) (string, string, error) {
|
||||
cmd := []string{"/bin/sh", "-c", cmdString}
|
||||
podOpt := framework.ExecOptions{
|
||||
podOpt := e2epod.ExecOptions{
|
||||
Command: cmd,
|
||||
PodName: podName,
|
||||
Namespace: nameSpace,
|
||||
@ -55,7 +55,7 @@ func execCommandInPodWithName(
|
||||
PreserveWhitespace: true,
|
||||
}
|
||||
|
||||
return f.ExecWithOptions(podOpt)
|
||||
return e2epod.ExecWithOptions(f, podOpt)
|
||||
}
|
||||
|
||||
// loadAppDeployment loads the deployment app config and return deployment
|
||||
@ -92,7 +92,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
|
||||
return fmt.Errorf("failed to delete deployment: %w", err)
|
||||
}
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting for deployment %q to be deleted", name)
|
||||
framework.Logf("Waiting for deployment %q to be deleted", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -103,7 +103,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("%q deployment to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("%q deployment to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to get deployment: %w", err)
|
||||
}
|
||||
@ -116,7 +116,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
|
||||
func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns string, deployTimeout int) error {
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %q to be in Available state", name)
|
||||
framework.Logf("Waiting up to %q to be in Available state", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
d, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -127,7 +127,7 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, err
|
||||
}
|
||||
@ -154,7 +154,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("deployment error: %v", err)
|
||||
framework.Logf("deployment error: %v", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
@ -166,7 +166,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
|
||||
if deployment.Status.Replicas == deployment.Status.ReadyReplicas {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"deployment status: expected replica count %d running replica count %d",
|
||||
deployment.Status.Replicas,
|
||||
deployment.Status.ReadyReplicas)
|
||||
@ -286,7 +286,7 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
|
||||
if err != nil {
|
||||
// depending on the Ceph/Rook version, modules are
|
||||
// enabled by default
|
||||
e2elog.Logf("enabling module %q failed: %v", module, err)
|
||||
framework.Logf("enabling module %q failed: %v", module, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,7 +295,7 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
|
||||
cmd := fmt.Sprintf("ceph orch set backend %s", rnr.orchBackend)
|
||||
_, _, err := execCommandInToolBoxPod(rnr.f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
e2elog.Logf("setting orch backend %q failed: %v", rnr.orchBackend, err)
|
||||
framework.Logf("setting orch backend %q failed: %v", rnr.orchBackend, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,14 +318,14 @@ func waitForDeploymentUpdateScale(
|
||||
if isRetryableAPIError(upsErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"Deployment UpdateScale %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error update scale deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
}
|
||||
if scaleResult.Spec.Replicas != scale.Spec.Replicas {
|
||||
e2elog.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d",
|
||||
framework.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d",
|
||||
ns, deploymentName, scale.Spec.Replicas, scaleResult.Spec.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error scale not matching in deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
@ -354,7 +354,7 @@ func waitForDeploymentUpdate(
|
||||
if isRetryableAPIError(upErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"Deployment Update %s/%s has not completed yet (%d seconds elapsed)",
|
||||
deployment.Namespace, deployment.Name, int(time.Since(start).Seconds()))
|
||||
|
||||
@ -391,7 +391,7 @@ func waitForContainersArgsUpdate(
|
||||
containers []string,
|
||||
timeout int,
|
||||
) error {
|
||||
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||
framework.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||
|
||||
// wait for the deployment to be available
|
||||
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
|
||||
@ -463,14 +463,14 @@ func waitForContainersArgsUpdate(
|
||||
if isRetryableAPIError(getErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"Deployment Get %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error getting deployment %s/%s: %w", ns, deploymentName, getErr)
|
||||
}
|
||||
if deploy.Status.Replicas != count {
|
||||
e2elog.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas)
|
||||
framework.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error expected deployment %s/%s replicas %d, got %d",
|
||||
ns, deploymentName, count, deploy.Status.Replicas)
|
||||
|
10
e2e/log.go
10
e2e/log.go
@ -22,7 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ func logsCSIPods(label string, c clientset.Interface) {
|
||||
}
|
||||
podList, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to list pods with selector %s %v", label, err)
|
||||
framework.Logf("failed to list pods with selector %s %v", label, err)
|
||||
|
||||
return
|
||||
}
|
||||
@ -50,11 +50,11 @@ func kubectlLogPod(c clientset.Interface, pod *v1.Pod) {
|
||||
if err != nil {
|
||||
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container[i].Name, err)
|
||||
framework.Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container[i].Name, err)
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Logs of %v/%v:%v on node %v\n", pod.Namespace, pod.Name, container[i].Name, pod.Spec.NodeName)
|
||||
e2elog.Logf("STARTLOG\n\n%s\n\nENDLOG for container %v:%v:%v", logs, pod.Namespace, pod.Name, container[i].Name)
|
||||
framework.Logf("Logs of %v/%v:%v on node %v\n", pod.Namespace, pod.Name, container[i].Name, pod.Spec.NodeName)
|
||||
framework.Logf("STARTLOG\n\n%s\n\nENDLOG for container %v:%v:%v", logs, pod.Namespace, pod.Name, container[i].Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func createNamespace(c kubernetes.Interface, name string) error {
|
||||
@ -46,7 +46,7 @@ func createNamespace(c kubernetes.Interface, name string) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
framework.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -74,7 +74,7 @@ func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
framework.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
212
e2e/nfs.go
212
e2e/nfs.go
@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
|
||||
"k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@ -56,19 +56,19 @@ func deployNFSPlugin(f *framework.Framework) {
|
||||
|
||||
err := deleteResource(nfsDirPath + nfsProvisionerRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner rbac %s: %v", nfsDirPath+nfsProvisionerRBAC, err)
|
||||
framework.Failf("failed to delete provisioner rbac %s: %v", nfsDirPath+nfsProvisionerRBAC, err)
|
||||
}
|
||||
|
||||
err = deleteResource(nfsDirPath + nfsNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s: %v", nfsDirPath+nfsNodePluginRBAC, err)
|
||||
framework.Failf("failed to delete nodeplugin rbac %s: %v", nfsDirPath+nfsNodePluginRBAC, err)
|
||||
}
|
||||
|
||||
// the pool should not be deleted, as it may contain configurations
|
||||
// from non-e2e related CephNFS objects
|
||||
err = createPool(f, nfsPoolName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err)
|
||||
framework.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err)
|
||||
}
|
||||
|
||||
createORDeleteNFSResources(f, kubectlCreate)
|
||||
@ -125,7 +125,7 @@ func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
|
||||
for _, r := range resources {
|
||||
err := r.Do(action)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s resource: %v", action, err)
|
||||
framework.Failf("failed to %s resource: %v", action, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -186,7 +186,7 @@ func createNFSStorageClass(
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
@ -205,7 +205,7 @@ func createNFSStorageClass(
|
||||
func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
|
||||
framework.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pod: %w", err)
|
||||
}
|
||||
@ -213,7 +213,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
|
||||
PersistentVolumeClaims(f.UniqueName).
|
||||
Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
|
||||
framework.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
|
||||
|
||||
return fmt.Errorf("failed to get pvc: %w", err)
|
||||
}
|
||||
@ -229,7 +229,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
|
||||
"csi-nfsplugin", // name of the container
|
||||
cephCSINamespace)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||
framework.Logf("StdErr occurred: %s", stdErr)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -249,7 +249,7 @@ var _ = Describe("nfs", func() {
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
|
||||
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
deployNFSPlugin(f)
|
||||
@ -259,25 +259,25 @@ var _ = Describe("nfs", func() {
|
||||
subvolumegroup = defaultSubvolumegroup
|
||||
err := createConfigMap(nfsDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap: %v", err)
|
||||
framework.Failf("failed to create configmap: %v", err)
|
||||
}
|
||||
// create nfs provisioner secret
|
||||
key, err := createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
}
|
||||
err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create provisioner secret: %v", err)
|
||||
framework.Failf("failed to create provisioner secret: %v", err)
|
||||
}
|
||||
// create nfs plugin secret
|
||||
key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node secret: %v", err)
|
||||
framework.Failf("failed to create node secret: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -294,34 +294,34 @@ var _ = Describe("nfs", func() {
|
||||
logsCSIPods("app=csi-nfsplugin", c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err := deleteConfigMap(nfsDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap: %v", err)
|
||||
framework.Failf("failed to delete configmap: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner secret: %v", err)
|
||||
framework.Failf("failed to delete provisioner secret: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret: %v", err)
|
||||
framework.Failf("failed to delete node secret: %v", err)
|
||||
}
|
||||
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
framework.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
if deployNFS {
|
||||
deleteNFSPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
|
||||
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -345,38 +345,38 @@ var _ = Describe("nfs", func() {
|
||||
|
||||
metadataPool, getErr := getCephFSMetadataPoolName(f, fileSystemName)
|
||||
if getErr != nil {
|
||||
e2elog.Failf("failed getting cephFS metadata pool name: %v", getErr)
|
||||
framework.Failf("failed getting cephFS metadata pool name: %v", getErr)
|
||||
}
|
||||
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(f.ClientSet, nfsDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s: %v", nfsDeploymentName, err)
|
||||
framework.Failf("timeout waiting for deployment %s: %v", nfsDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(nfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s: %v", nfsDeamonSetName, err)
|
||||
framework.Failf("timeout waiting for daemonset %s: %v", nfsDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("verify RWOP volume support", func() {
|
||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create NFS storageclass: %v", err)
|
||||
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||
}
|
||||
pvc, err := loadPVC(pvcRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
// create application
|
||||
app, err := loadApp(appRWOPPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
baseAppName := app.Name
|
||||
@ -384,59 +384,59 @@ var _ = Describe("nfs", func() {
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
if rwopMayFail(err) {
|
||||
e2elog.Logf("RWOP is not supported: %v", err)
|
||||
framework.Logf("RWOP is not supported: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
framework.Failf("failed to create application: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 1, fileSystemName, defaultSubvolumegroup)
|
||||
|
||||
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
framework.Failf("failed to validate RWOP pod creation: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, defaultSubvolumegroup)
|
||||
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete NFS storageclass: %v", err)
|
||||
framework.Failf("failed to delete NFS storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create a storageclass with pool and a PVC then bind it to an app", func() {
|
||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create NFS storageclass: %v", err)
|
||||
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||
}
|
||||
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate NFS pvc and application binding: %v", err)
|
||||
framework.Failf("failed to validate NFS pvc and application binding: %v", err)
|
||||
}
|
||||
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete NFS storageclass: %v", err)
|
||||
framework.Failf("failed to delete NFS storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create a PVC and bind it to an app", func() {
|
||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create NFS storageclass: %v", err)
|
||||
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||
}
|
||||
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate NFS pvc and application binding: %v", err)
|
||||
framework.Failf("failed to validate NFS pvc and application binding: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create a PVC and bind it to an app with normal user", func() {
|
||||
err := validateNormalUserPVCAccess(pvcPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate normal user NFS pvc and application binding: %v", err)
|
||||
framework.Failf("failed to validate normal user NFS pvc and application binding: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -444,13 +444,13 @@ var _ = Describe("nfs", func() {
|
||||
totalCount := 2
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
// create PVC and app
|
||||
@ -458,11 +458,11 @@ var _ = Describe("nfs", func() {
|
||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||
err = createPVCAndApp(name, f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC or application: %v", err)
|
||||
framework.Failf("failed to create PVC or application: %v", err)
|
||||
}
|
||||
err = validateSubvolumePath(f, pvc.Name, pvc.Namespace, fileSystemName, defaultSubvolumegroup)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate subvolumePath: %v", err)
|
||||
framework.Failf("failed to validate subvolumePath: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,7 +472,7 @@ var _ = Describe("nfs", func() {
|
||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||
err = deletePVCAndApp(name, f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
@ -482,24 +482,24 @@ var _ = Describe("nfs", func() {
|
||||
By("check data persist after recreating pod", func() {
|
||||
err := checkDataPersist(pvcPath, appPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to check data persist in pvc: %v", err)
|
||||
framework.Failf("failed to check data persist in pvc: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("Create PVC, bind it to an app, unmount volume and check app deletion", func() {
|
||||
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC or application: %v", err)
|
||||
framework.Failf("failed to create PVC or application: %v", err)
|
||||
}
|
||||
|
||||
err = unmountNFSVolume(f, app.Name, pvc.Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to unmount volume: %v", err)
|
||||
framework.Failf("failed to unmount volume: %v", err)
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -507,13 +507,13 @@ var _ = Describe("nfs", func() {
|
||||
// create PVC and bind it to an app
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
|
||||
app.Namespace = f.UniqueName
|
||||
@ -525,7 +525,7 @@ var _ = Describe("nfs", func() {
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
|
||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC or application: %v", err)
|
||||
framework.Failf("failed to create PVC or application: %v", err)
|
||||
}
|
||||
|
||||
opt := metav1.ListOptions{
|
||||
@ -540,31 +540,31 @@ var _ = Describe("nfs", func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
e2elog.Failf(stdErr)
|
||||
framework.Failf(stdErr)
|
||||
}
|
||||
|
||||
// delete PVC and app
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// delete nfs provisioner secret
|
||||
err := deleteCephUser(f, keyringCephFSProvisionerUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
}
|
||||
// delete nfs plugin secret
|
||||
err = deleteCephUser(f, keyringCephFSNodePluginUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
|
||||
By("Resize PVC and check application directory size", func() {
|
||||
err := resizePVCAndValidateSize(pvcPath, appPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to resize PVC: %v", err)
|
||||
framework.Failf("failed to resize PVC: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -579,28 +579,28 @@ var _ = Describe("nfs", func() {
|
||||
wg.Add(totalCount)
|
||||
err := createNFSSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete NFS snapshotclass: %v", err)
|
||||
framework.Failf("failed to delete NFS snapshotclass: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = deleteNFSSnapshotClass()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
framework.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
}
|
||||
}()
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
|
||||
app.Namespace = f.UniqueName
|
||||
@ -613,12 +613,12 @@ var _ = Describe("nfs", func() {
|
||||
}
|
||||
checkSum, err := writeDataAndCalChecksum(app, &opt, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
|
||||
_, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
snap := getSnapshot(snapshotPath)
|
||||
@ -638,22 +638,22 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating snapshots failed, %d errors were logged", failed)
|
||||
framework.Failf("creating snapshots failed, %d errors were logged", failed)
|
||||
}
|
||||
validateCephFSSnapshotCount(f, totalCount, defaultSubvolumegroup, pv)
|
||||
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -673,14 +673,14 @@ var _ = Describe("nfs", func() {
|
||||
}
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
e2elog.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
framework.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
||||
e2elog.Logf("checksum for clone is %s", checkSumClone)
|
||||
framework.Logf("checksum for clone is %s", checkSumClone)
|
||||
if chErrs[n] != nil {
|
||||
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
framework.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
}
|
||||
if checkSumClone != checkSum {
|
||||
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
@ -691,23 +691,23 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("creating PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
for i, err := range chErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
framework.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||
@ -729,12 +729,12 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
parentPVCCount := totalSubvolumes - totalCount
|
||||
@ -755,14 +755,14 @@ var _ = Describe("nfs", func() {
|
||||
}
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
e2elog.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
framework.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
||||
e2elog.Logf("checksum for clone is %s", checkSumClone)
|
||||
framework.Logf("checksum for clone is %s", checkSumClone)
|
||||
if chErrs[n] != nil {
|
||||
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
framework.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
}
|
||||
if checkSumClone != checkSum {
|
||||
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
@ -773,23 +773,23 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("creating PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
for i, err := range chErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
framework.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||
@ -810,12 +810,12 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting snapshots failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting snapshots failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv)
|
||||
@ -835,12 +835,12 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup)
|
||||
@ -849,7 +849,7 @@ var _ = Describe("nfs", func() {
|
||||
// delete parent pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
@ -867,17 +867,17 @@ var _ = Describe("nfs", func() {
|
||||
totalSubvolumes := totalCount + 1
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||
@ -889,18 +889,18 @@ var _ = Describe("nfs", func() {
|
||||
}
|
||||
checkSum, err := writeDataAndCalChecksum(app, &opt, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
|
||||
pvcClone, err := loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone, err := loadApp(appSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appClone.Namespace = f.UniqueName
|
||||
appClone.Labels = label
|
||||
@ -913,14 +913,14 @@ var _ = Describe("nfs", func() {
|
||||
if wgErrs[n] == nil {
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
e2elog.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
framework.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
||||
e2elog.Logf("checksum for clone is %s", checkSumClone)
|
||||
framework.Logf("checksum for clone is %s", checkSumClone)
|
||||
if chErrs[n] != nil {
|
||||
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
framework.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
}
|
||||
if checkSumClone != checkSum {
|
||||
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
@ -932,23 +932,23 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC or application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC or application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
for i, err := range chErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
framework.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||
@ -957,7 +957,7 @@ var _ = Describe("nfs", func() {
|
||||
// delete parent pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
framework.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
wg.Add(totalCount)
|
||||
@ -975,12 +975,12 @@ var _ = Describe("nfs", func() {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC or application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC or application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
)
|
||||
|
||||
func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error {
|
||||
@ -35,7 +36,7 @@ func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error
|
||||
return fmt.Errorf("failed to list node: %w", err)
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||
e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -47,7 +48,7 @@ func deleteNodeLabel(c kubernetes.Interface, labelKey string) error {
|
||||
return fmt.Errorf("failed to list node: %w", err)
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||
e2enode.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -59,7 +60,7 @@ func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) erro
|
||||
return fmt.Errorf("failed to list node: %w", err)
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||
e2enode.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
58
e2e/pod.go
58
e2e/pod.go
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
|
||||
@ -40,17 +40,17 @@ const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same
|
||||
func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string) (string, error) {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonSetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting daemonsets with name %s in namespace %s", daemonSetName, ns)
|
||||
framework.Logf("Error getting daemonsets with name %s in namespace %s", daemonSetName, ns)
|
||||
|
||||
return "", err
|
||||
}
|
||||
s, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
e2elog.Logf("Error parsing %s daemonset selector in namespace %s", daemonSetName, ns)
|
||||
framework.Logf("Error parsing %s daemonset selector in namespace %s", daemonSetName, ns)
|
||||
|
||||
return "", err
|
||||
}
|
||||
e2elog.Logf("LabelSelector for %s daemonsets in namespace %s: %s", daemonSetName, ns, s.String())
|
||||
framework.Logf("LabelSelector for %s daemonsets in namespace %s: %s", daemonSetName, ns, s.String())
|
||||
|
||||
return s.String(), nil
|
||||
}
|
||||
@ -58,12 +58,12 @@ func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string)
|
||||
func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return false, nil
|
||||
}
|
||||
@ -75,7 +75,7 @@ func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
||||
}
|
||||
dNum := ds.Status.DesiredNumberScheduled
|
||||
ready := ds.Status.NumberReady
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)",
|
||||
ready,
|
||||
dNum,
|
||||
@ -98,7 +98,7 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
|
||||
listErr error
|
||||
)
|
||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
podList, listErr = f.PodClientNS(ns).List(context.TODO(), *opt)
|
||||
podList, listErr = e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
|
||||
if listErr != nil {
|
||||
if isRetryableAPIError(listErr) {
|
||||
return false, nil
|
||||
@ -137,14 +137,14 @@ func getCommandInPodOpts(
|
||||
f *framework.Framework,
|
||||
c, ns, cn string,
|
||||
opt *metav1.ListOptions,
|
||||
) (framework.ExecOptions, error) {
|
||||
) (e2epod.ExecOptions, error) {
|
||||
cmd := []string{"/bin/sh", "-c", c}
|
||||
pName, cName, err := findPodAndContainerName(f, ns, cn, opt)
|
||||
if err != nil {
|
||||
return framework.ExecOptions{}, err
|
||||
return e2epod.ExecOptions{}, err
|
||||
}
|
||||
|
||||
return framework.ExecOptions{
|
||||
return e2epod.ExecOptions{
|
||||
Command: cmd,
|
||||
PodName: pName,
|
||||
Namespace: ns,
|
||||
@ -188,7 +188,7 @@ func execCommandInDaemonsetPod(
|
||||
}
|
||||
|
||||
cmd := []string{"/bin/sh", "-c", c}
|
||||
podOpt := framework.ExecOptions{
|
||||
podOpt := e2epod.ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: ns,
|
||||
PodName: podName,
|
||||
@ -204,7 +204,7 @@ func execCommandInDaemonsetPod(
|
||||
|
||||
// listPods returns slice of pods matching given ListOptions and namespace.
|
||||
func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.Pod, error) {
|
||||
podList, err := f.PodClientNS(ns).List(context.TODO(), *opt)
|
||||
podList, err := e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
|
||||
if len(podList.Items) == 0 {
|
||||
return podList.Items, fmt.Errorf("podlist for label '%s' in namespace %s is empty", opt.LabelSelector, ns)
|
||||
}
|
||||
@ -212,18 +212,18 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.
|
||||
return podList.Items, err
|
||||
}
|
||||
|
||||
func execWithRetry(f *framework.Framework, opts *framework.ExecOptions) (string, string, error) {
|
||||
func execWithRetry(f *framework.Framework, opts *e2epod.ExecOptions) (string, string, error) {
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
var stdOut, stdErr string
|
||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
var execErr error
|
||||
stdOut, stdErr, execErr = f.ExecWithOptions(*opts)
|
||||
stdOut, stdErr, execErr = e2epod.ExecWithOptions(f, *opts)
|
||||
if execErr != nil {
|
||||
if isRetryableAPIError(execErr) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
e2elog.Logf("failed to execute command: %v", execErr)
|
||||
framework.Logf("failed to execute command: %v", execErr)
|
||||
|
||||
return false, fmt.Errorf("failed to execute command: %w", execErr)
|
||||
}
|
||||
@ -242,7 +242,7 @@ func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOpti
|
||||
|
||||
stdOut, stdErr, err := execWithRetry(f, &podOpt)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
framework.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
@ -258,7 +258,7 @@ func execCommandInContainer(
|
||||
|
||||
stdOut, stdErr, err := execWithRetry(f, &podOpt)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
framework.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
@ -268,7 +268,7 @@ func execCommandInContainerByPodName(
|
||||
f *framework.Framework, shellCmd, namespace, podName, containerName string,
|
||||
) (string, string, error) {
|
||||
cmd := []string{"/bin/sh", "-c", shellCmd}
|
||||
execOpts := framework.ExecOptions{
|
||||
execOpts := e2epod.ExecOptions{
|
||||
Command: cmd,
|
||||
PodName: podName,
|
||||
Namespace: namespace,
|
||||
@ -281,7 +281,7 @@ func execCommandInContainerByPodName(
|
||||
|
||||
stdOut, stdErr, err := execWithRetry(f, &execOpts)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
framework.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
@ -298,7 +298,7 @@ func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, stri
|
||||
|
||||
stdOut, stdErr, err := execWithRetry(f, &podOpt)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
framework.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
|
||||
return stdOut, stdErr, err
|
||||
@ -312,7 +312,7 @@ func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *met
|
||||
|
||||
stdOut, stdErr, err := execWithRetry(f, &podOpt)
|
||||
if err != nil {
|
||||
e2elog.Logf("command %s failed: %v", c, err)
|
||||
framework.Logf("command %s failed: %v", c, err)
|
||||
}
|
||||
|
||||
return stdOut, stdErr
|
||||
@ -351,7 +351,7 @@ func createAppErr(c kubernetes.Interface, app *v1.Pod, timeout int, errString st
|
||||
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, expectedError string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Running state", name)
|
||||
framework.Logf("Waiting up to %v to be in Running state", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -376,13 +376,13 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
||||
return false, err
|
||||
}
|
||||
if strings.Contains(events.String(), expectedError) {
|
||||
e2elog.Logf("Expected Error %q found successfully", expectedError)
|
||||
framework.Logf("Expected Error %q found successfully", expectedError)
|
||||
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
case v1.PodUnknown:
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"%s app is in %s phase expected to be in Running state (%d seconds elapsed)",
|
||||
name,
|
||||
pod.Status.Phase,
|
||||
@ -400,7 +400,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
return fmt.Errorf("failed to delete app: %w", err)
|
||||
}
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting for pod %v to be deleted", name)
|
||||
framework.Logf("Waiting for pod %v to be deleted", name)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -411,7 +411,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to get app: %w", err)
|
||||
}
|
||||
@ -431,7 +431,7 @@ func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
label,
|
||||
fmt.Sprintf("--ignore-not-found=%t", skipNotFound))
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete pod %v", err)
|
||||
framework.Logf("failed to delete pod %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -449,7 +449,7 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
|
||||
}
|
||||
// extract checksum from sha512sum output.
|
||||
checkSum := strings.Split(sha512sumOut, "")[0]
|
||||
e2elog.Logf("Calculated checksum %s", checkSum)
|
||||
framework.Logf("Calculated checksum %s", checkSum)
|
||||
|
||||
return checkSum, nil
|
||||
}
|
||||
|
37
e2e/pvc.go
37
e2e/pvc.go
@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
)
|
||||
|
||||
@ -57,13 +56,13 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
name := pvc.Name
|
||||
namespace := pvc.Namespace
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||
framework.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -132,7 +131,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
pvcToDelete := pvc
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PVC is deleted.
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
||||
pvcToDelete.Name,
|
||||
pvcToDelete.Status.String(),
|
||||
@ -144,7 +143,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
if pvcToDelete.Status.Phase == "" {
|
||||
// this is unexpected, an empty Phase is not defined
|
||||
// FIXME: see https://github.com/ceph/ceph-csi/issues/1874
|
||||
e2elog.Logf("PVC %s is in a weird state: %s", pvcToDelete.Name, pvcToDelete.String())
|
||||
framework.Logf("PVC %s is in a weird state: %s", pvcToDelete.Name, pvcToDelete.String())
|
||||
}
|
||||
|
||||
return false, nil
|
||||
@ -170,7 +169,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PV is deleted.
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"waiting for PV %s in state %s to be deleted (%d seconds elapsed)",
|
||||
pvToDelete.Name,
|
||||
pvToDelete.Status.String(),
|
||||
@ -200,7 +199,7 @@ func getPersistentVolumeClaim(c kubernetes.Interface, namespace, name string) (*
|
||||
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -223,7 +222,7 @@ func getPersistentVolume(c kubernetes.Interface, name string) (*v1.PersistentVol
|
||||
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pv %q: %v", name, err)
|
||||
framework.Logf("Error getting pv %q: %v", name, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -258,7 +257,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
nameSpace := pvc.Namespace
|
||||
name := pvc.Name
|
||||
var err error
|
||||
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||
framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||
|
||||
pvc, err = getPersistentVolumeClaim(c, nameSpace, name)
|
||||
if err != nil {
|
||||
@ -277,19 +276,19 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PVC is really deleted.
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
||||
name,
|
||||
pvc.Status.String(),
|
||||
int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
e2elog.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
|
||||
framework.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
if isRetryableAPIError(err) {
|
||||
e2elog.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)
|
||||
framework.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@ -300,12 +299,12 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
||||
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
e2elog.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
|
||||
framework.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
if isRetryableAPIError(err) {
|
||||
e2elog.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)
|
||||
framework.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@ -387,12 +386,12 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
|
||||
framework.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
if stdOut == "" {
|
||||
e2elog.Logf("no metrics received from kubelet on IP %s", kubelet)
|
||||
framework.Logf("no metrics received from kubelet on IP %s", kubelet)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@ -406,13 +405,13 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
|
||||
}
|
||||
if strings.Contains(line, namespace) && strings.Contains(line, name) {
|
||||
// TODO: validate metrics if possible
|
||||
e2elog.Logf("found metrics for pvc %s/%s: %s", pvc.Namespace, pvc.Name, line)
|
||||
framework.Logf("found metrics for pvc %s/%s: %s", pvc.Namespace, pvc.Name, line)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("no metrics found for pvc %s/%s", pvc.Namespace, pvc.Name)
|
||||
framework.Logf("no metrics found for pvc %s/%s", pvc.Namespace, pvc.Name)
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
1211
e2e/rbd.go
1211
e2e/rbd.go
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
@ -168,7 +167,7 @@ func createRBDStorageClass(
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -326,13 +325,13 @@ func validateImageOwner(pvcPath string, f *framework.Framework) error {
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"found image journal %s in pool %s namespace %s",
|
||||
"csi.volume."+imageData.imageID,
|
||||
defaultRBDPool,
|
||||
radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, defaultRBDPool)
|
||||
framework.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, defaultRBDPool)
|
||||
}
|
||||
|
||||
if !strings.Contains(stdOut, pvc.Namespace) {
|
||||
@ -347,7 +346,7 @@ func logErrors(f *framework.Framework, msg string, wgErrs []error) int {
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("%s (%s%d): %v", msg, f.UniqueName, i, err)
|
||||
framework.Logf("%s (%s%d): %v", msg, f.UniqueName, i, err)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
@ -526,7 +525,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f
|
||||
if !destroyed {
|
||||
return fmt.Errorf("passphrased was not destroyed: %s", msg)
|
||||
} else if msg != "" {
|
||||
e2elog.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
framework.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,7 +574,7 @@ func validateEncryptedFilesystemAndAppBinding(pvcPath, appPath string, kms kmsCo
|
||||
if !destroyed {
|
||||
return fmt.Errorf("passphrased was not destroyed: %s", msg)
|
||||
} else if msg != "" {
|
||||
e2elog.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
framework.Logf("passphrase destroyed, but message returned: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -723,23 +722,26 @@ func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:unused // required for reclaimspace e2e.
|
||||
// rbdDuImage contains the disk-usage statistics of an RBD image.
|
||||
//
|
||||
//nolint:unused // required for reclaimspace e2e.
|
||||
type rbdDuImage struct {
|
||||
Name string `json:"name"`
|
||||
ProvisionedSize uint64 `json:"provisioned_size"`
|
||||
UsedSize uint64 `json:"used_size"`
|
||||
}
|
||||
|
||||
//nolint:unused // required for reclaimspace e2e.
|
||||
// rbdDuImageList contains the list of images returned by 'rbd du'.
|
||||
//
|
||||
//nolint:unused // required for reclaimspace e2e.
|
||||
type rbdDuImageList struct {
|
||||
Images []*rbdDuImage `json:"images"`
|
||||
}
|
||||
|
||||
//nolint:deadcode,unused // required for reclaimspace e2e.
|
||||
// getRbdDu runs 'rbd du' on the RBD image and returns a rbdDuImage struct with
|
||||
// the result.
|
||||
//
|
||||
//nolint:deadcode,unused // required for reclaimspace e2e.
|
||||
func getRbdDu(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*rbdDuImage, error) {
|
||||
rdil := rbdDuImageList{}
|
||||
|
||||
@ -768,11 +770,12 @@ func getRbdDu(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*rbdDuImag
|
||||
return nil, fmt.Errorf("image %s not found", imageData.imageName)
|
||||
}
|
||||
|
||||
//nolint:deadcode,unused // required for reclaimspace e2e.
|
||||
// sparsifyBackingRBDImage runs `rbd sparsify` on the RBD image. Once done, all
|
||||
// data blocks that contain zeros are discarded/trimmed/unmapped and do not
|
||||
// take up any space anymore. This can be used to verify that an empty, but
|
||||
// allocated (with zerofill) extents have been released.
|
||||
//
|
||||
//nolint:deadcode,unused // required for reclaimspace e2e.
|
||||
func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
@ -849,9 +852,9 @@ func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
|
||||
framework.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
framework.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
}
|
||||
|
||||
return stdOut, nil
|
||||
@ -896,13 +899,13 @@ func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolume
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"found image journal %s in pool %s namespace %s",
|
||||
"csi.volume."+imageData.imageID,
|
||||
pool,
|
||||
radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||
framework.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -931,13 +934,13 @@ func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeCl
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"found CSI journal entry %s in pool %s namespace %s",
|
||||
"csi.volume."+imageData.pvName,
|
||||
pool,
|
||||
radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||
framework.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1039,7 +1042,7 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
||||
return true, nil
|
||||
}
|
||||
errReason = fmt.Errorf("found %d images found in trash. Image details %v", len(imagesInTrash), imagesInTrash)
|
||||
e2elog.Logf(errReason.Error())
|
||||
framework.Logf(errReason.Error())
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/cloud-provider/volume/helpers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error {
|
||||
@ -49,15 +48,15 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Resized state", pvc)
|
||||
framework.Logf("Waiting up to %v to be in Resized state", pvc)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds()))
|
||||
framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds()))
|
||||
updatedPVC, err = c.CoreV1().
|
||||
PersistentVolumeClaims(pvcNamespace).
|
||||
Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err)
|
||||
framework.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -66,7 +65,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
}
|
||||
pvcConditions := updatedPVC.Status.Conditions
|
||||
if len(pvcConditions) > 0 {
|
||||
e2elog.Logf("pvc state %v", pvcConditions[0].Type)
|
||||
framework.Logf("pvc state %v", pvcConditions[0].Type)
|
||||
if pvcConditions[0].Type == v1.PersistentVolumeClaimResizing ||
|
||||
pvcConditions[0].Type == v1.PersistentVolumeClaimFileSystemResizePending {
|
||||
return false, nil
|
||||
@ -74,7 +73,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
||||
}
|
||||
|
||||
if !updatedPVC.Status.Capacity[v1.ResourceStorage].Equal(resource.MustParse(size)) {
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"current size in status %v,expected size %v",
|
||||
updatedPVC.Status.Capacity[v1.ResourceStorage],
|
||||
resource.MustParse(size))
|
||||
@ -187,13 +186,13 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
||||
framework.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
||||
output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("failed to execute command in app pod %v", stdErr)
|
||||
framework.Logf("failed to execute command in app pod %v", stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@ -208,7 +207,7 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
||||
return false, err
|
||||
}
|
||||
if actualSize != expectedSize {
|
||||
e2elog.Logf("expected size %s found %s information", size, output)
|
||||
framework.Logf("expected size %s found %s information", size, output)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
|
||||
@ -74,20 +73,20 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create volumesnapshot: %w", err)
|
||||
}
|
||||
e2elog.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace)
|
||||
framework.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace)
|
||||
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
name := snap.Name
|
||||
start := time.Now()
|
||||
e2elog.Logf("waiting for %v to be in ready state", snap)
|
||||
framework.Logf("waiting for %v to be in ready state", snap)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
|
||||
snaps, err := sclient.
|
||||
VolumeSnapshots(snap.Namespace).
|
||||
Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
|
||||
framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
@ -103,7 +102,7 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
if *snaps.Status.ReadyToUse {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("snapshot %s in %v state", snap.Name, *snaps.Status.ReadyToUse)
|
||||
framework.Logf("snapshot %s in %v state", snap.Name, *snaps.Status.ReadyToUse)
|
||||
|
||||
return false, nil
|
||||
})
|
||||
@ -125,10 +124,10 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
name := snap.Name
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be deleted", snap)
|
||||
framework.Logf("Waiting up to %v to be deleted", snap)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
_, err := sclient.
|
||||
VolumeSnapshots(snap.Namespace).
|
||||
Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -227,7 +226,7 @@ func createNFSSnapshotClass(f *framework.Framework) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = sclient.VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error creating SnapshotClass %q: %v", sc.Name, err)
|
||||
framework.Logf("error creating SnapshotClass %q: %v", sc.Name, err)
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
@ -256,7 +255,7 @@ func deleteNFSSnapshotClass() error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
err = sclient.VolumeSnapshotClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error deleting SnapshotClass %q: %v", sc.Name, err)
|
||||
framework.Logf("error deleting SnapshotClass %q: %v", sc.Name, err)
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
@ -341,14 +340,14 @@ func validateBiggerPVCFromSnapshot(f *framework.Framework,
|
||||
}
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.DataSource.Name = snap.Name
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appClone.Namespace = f.UniqueName
|
||||
appClone.Labels = label
|
||||
@ -384,28 +383,28 @@ func validateBiggerPVCFromSnapshot(f *framework.Framework,
|
||||
)
|
||||
imageList, err = listRBDImages(f, defaultRBDPool)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to list rbd images: %v", err)
|
||||
framework.Failf("failed to list rbd images: %v", err)
|
||||
}
|
||||
e2elog.Logf("list of rbd images: %v", imageList)
|
||||
framework.Logf("list of rbd images: %v", imageList)
|
||||
volSnapName, stdErr, err = execCommandInToolBoxPod(f,
|
||||
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNameKey),
|
||||
rookNamespace)
|
||||
if checkGetKeyError(err, stdErr) {
|
||||
e2elog.Failf("found volume snapshot name %s/%s %s=%s: err=%v stdErr=%q",
|
||||
framework.Failf("found volume snapshot name %s/%s %s=%s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey, volSnapName, err, stdErr)
|
||||
}
|
||||
volSnapNamespace, stdErr, err = execCommandInToolBoxPod(f,
|
||||
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNamespaceKey),
|
||||
rookNamespace)
|
||||
if checkGetKeyError(err, stdErr) {
|
||||
e2elog.Failf("found volume snapshot namespace %s/%s %s=%s: err=%v stdErr=%q",
|
||||
framework.Failf("found volume snapshot namespace %s/%s %s=%s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey, volSnapNamespace, err, stdErr)
|
||||
}
|
||||
volSnapContentName, stdErr, err = execCommandInToolBoxPod(f,
|
||||
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapContentNameKey),
|
||||
rookNamespace)
|
||||
if checkGetKeyError(err, stdErr) {
|
||||
e2elog.Failf("found snapshotcontent name %s/%s %s=%s: err=%v stdErr=%q",
|
||||
framework.Failf("found snapshotcontent name %s/%s %s=%s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey,
|
||||
volSnapContentName, err, stdErr)
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
|
||||
"k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@ -64,7 +64,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
if cephCSINamespace != defaultNs {
|
||||
err = createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create namespace: %v", err)
|
||||
framework.Failf("failed to create namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,44 +72,44 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// when we are done upgrading.
|
||||
cwd, err = os.Getwd()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to getwd: %v", err)
|
||||
framework.Failf("failed to getwd: %v", err)
|
||||
}
|
||||
deployVault(f.ClientSet, deployTimeout)
|
||||
err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to upgrade csi: %v", err)
|
||||
framework.Failf("failed to upgrade csi: %v", err)
|
||||
}
|
||||
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap: %v", err)
|
||||
framework.Failf("failed to create configmap: %v", err)
|
||||
}
|
||||
var key string
|
||||
// create cephFS provisioner secret
|
||||
key, err = createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
}
|
||||
err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create provisioner secret: %v", err)
|
||||
framework.Failf("failed to create provisioner secret: %v", err)
|
||||
}
|
||||
// create cephFS plugin secret
|
||||
key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node secret: %v", err)
|
||||
framework.Failf("failed to create node secret: %v", err)
|
||||
}
|
||||
|
||||
err = createCephFSSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshotclass: %v", err)
|
||||
framework.Failf("failed to create snapshotclass: %v", err)
|
||||
}
|
||||
err = createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
framework.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
AfterEach(func() {
|
||||
@ -125,31 +125,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
logsCSIPods("app=csi-cephfsplugin", c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err = deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap: %v", err)
|
||||
framework.Failf("failed to delete configmap: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner secret: %v", err)
|
||||
framework.Failf("failed to delete provisioner secret: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret: %v", err)
|
||||
framework.Failf("failed to delete node secret: %v", err)
|
||||
}
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
framework.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
framework.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
deleteVault()
|
||||
if deployCephFS {
|
||||
@ -158,7 +158,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete namespace: %v", err)
|
||||
framework.Failf("failed to delete namespace: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -174,13 +174,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
|
||||
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
|
||||
}
|
||||
})
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
|
||||
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -193,13 +193,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
|
||||
pvc, err = loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err = loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = appLabel
|
||||
app.Namespace = f.UniqueName
|
||||
@ -208,12 +208,12 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc and application: %v", err)
|
||||
framework.Failf("failed to create pvc and application: %v", err)
|
||||
}
|
||||
var pv *v1.PersistentVolume
|
||||
_, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
opt := metav1.ListOptions{
|
||||
@ -230,19 +230,19 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
app.Namespace,
|
||||
&opt)
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to write data to a file %s", stdErr)
|
||||
framework.Failf("failed to write data to a file %s", stdErr)
|
||||
}
|
||||
|
||||
// force an immediate write of all cached data to disk.
|
||||
_, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt)
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to sync data to a disk %s", stdErr)
|
||||
framework.Failf("failed to sync data to a disk %s", stdErr)
|
||||
}
|
||||
|
||||
e2elog.Logf("Calculating checksum of %s", filePath)
|
||||
framework.Logf("Calculating checksum of %s", filePath)
|
||||
checkSum, err = calculateSHA512sum(f, app, filePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
// Create snapshot of the pvc
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
@ -252,31 +252,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshot %v", err)
|
||||
framework.Failf("failed to create snapshot %v", err)
|
||||
}
|
||||
validateCephFSSnapshotCount(f, 1, defaultSubvolumegroup, pv)
|
||||
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete application: %v", err)
|
||||
framework.Failf("failed to delete application: %v", err)
|
||||
}
|
||||
deleteCephfsPlugin()
|
||||
|
||||
// switch back to current changes.
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to d chdir: %v", err)
|
||||
framework.Failf("failed to d chdir: %v", err)
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
|
||||
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s: %v", cephFSDeploymentName, err)
|
||||
framework.Failf("timeout waiting for upgraded deployment %s: %v", cephFSDeploymentName, err)
|
||||
}
|
||||
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", cephFSDeamonSetName, err)
|
||||
framework.Failf("timeout waiting for upgraded daemonset %s: %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
|
||||
app.Labels = label
|
||||
@ -284,7 +284,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// an earlier release.
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
framework.Failf("failed to create application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -294,13 +294,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
label := make(map[string]string)
|
||||
pvcClone, err = loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
appClone, err = loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = "validate-snap-cephfs"
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -308,12 +308,12 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
appClone.Labels = label
|
||||
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc and application: %v", err)
|
||||
framework.Failf("failed to create pvc and application: %v", err)
|
||||
}
|
||||
var pv *v1.PersistentVolume
|
||||
_, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
opt := metav1.ListOptions{
|
||||
@ -323,15 +323,15 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
testFilePath := filepath.Join(mountPath, "testClone")
|
||||
newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
if strings.Compare(newCheckSum, checkSum) != 0 {
|
||||
e2elog.Failf(
|
||||
framework.Failf(
|
||||
"The checksum of files did not match, expected %s received %s ",
|
||||
checkSum,
|
||||
newCheckSum)
|
||||
}
|
||||
e2elog.Logf("The checksum of files matched")
|
||||
framework.Logf("The checksum of files matched")
|
||||
|
||||
// delete cloned pvc and pod
|
||||
err = deletePVCAndApp("", f, pvcClone, appClone)
|
||||
@ -347,7 +347,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete snapshot %v", err)
|
||||
framework.Failf("failed to delete snapshot %v", err)
|
||||
}
|
||||
validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv)
|
||||
})
|
||||
@ -359,14 +359,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
|
||||
pvcClone, err = loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
appClone, err = loadApp(appSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = "validate-snap-cephfs"
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -374,7 +374,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
appClone.Labels = label
|
||||
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc and application: %v", err)
|
||||
framework.Failf("failed to create pvc and application: %v", err)
|
||||
}
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
||||
@ -383,21 +383,21 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
testFilePath := filepath.Join(mountPath, "testClone")
|
||||
newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
|
||||
if strings.Compare(newCheckSum, checkSum) != 0 {
|
||||
e2elog.Failf(
|
||||
framework.Failf(
|
||||
"The checksum of files did not match, expected %s received %s",
|
||||
checkSum,
|
||||
newCheckSum)
|
||||
}
|
||||
e2elog.Logf("The checksum of files matched")
|
||||
framework.Logf("The checksum of files matched")
|
||||
|
||||
// delete cloned pvc and pod
|
||||
err = deletePVCAndApp("", f, pvcClone, appClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc and application: %v", err)
|
||||
framework.Failf("failed to delete pvc and application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -411,40 +411,40 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get pvc: %v", err)
|
||||
framework.Failf("failed to get pvc: %v", err)
|
||||
}
|
||||
|
||||
// resize PVC
|
||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to expand pvc: %v", err)
|
||||
framework.Failf("failed to expand pvc: %v", err)
|
||||
}
|
||||
// wait for application pod to come up after resize
|
||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for pod to be in running state: %v", err)
|
||||
framework.Failf("timeout waiting for pod to be in running state: %v", err)
|
||||
}
|
||||
// validate if resize is successful.
|
||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to check directory size: %v", err)
|
||||
framework.Failf("failed to check directory size: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("delete pvc and app")
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc and application: %v", err)
|
||||
framework.Failf("failed to delete pvc and application: %v", err)
|
||||
}
|
||||
// delete cephFS provisioner secret
|
||||
err = deleteCephUser(f, keyringCephFSProvisionerUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
|
||||
}
|
||||
// delete cephFS plugin secret
|
||||
err = deleteCephUser(f, keyringCephFSNodePluginUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
|
||||
"k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@ -60,7 +60,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create namespace: %v", err)
|
||||
framework.Failf("failed to create namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,52 +69,52 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
var err error
|
||||
cwd, err = os.Getwd()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to do getwd: %v", err)
|
||||
framework.Failf("failed to do getwd: %v", err)
|
||||
}
|
||||
|
||||
deployVault(f.ClientSet, deployTimeout)
|
||||
err = upgradeAndDeployCSI(upgradeVersion, "rbd")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to upgrade and deploy CSI: %v", err)
|
||||
framework.Failf("failed to upgrade and deploy CSI: %v", err)
|
||||
}
|
||||
err = createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap: %v", err)
|
||||
framework.Failf("failed to create configmap: %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
framework.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
// create rbd provisioner secret
|
||||
key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", ""))
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err)
|
||||
}
|
||||
err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create provisioner secret: %v", err)
|
||||
framework.Failf("failed to create provisioner secret: %v", err)
|
||||
}
|
||||
// create rbd plugin secret
|
||||
key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", ""))
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err)
|
||||
framework.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err)
|
||||
}
|
||||
err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node secret: %v", err)
|
||||
framework.Failf("failed to create node secret: %v", err)
|
||||
}
|
||||
err = createRBDSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshotclass: %v", err)
|
||||
framework.Failf("failed to create snapshotclass: %v", err)
|
||||
}
|
||||
|
||||
err = createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node label: %v", err)
|
||||
framework.Failf("failed to create node label: %v", err)
|
||||
}
|
||||
err = createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node label: %v", err)
|
||||
framework.Failf("failed to create node label: %v", err)
|
||||
}
|
||||
})
|
||||
AfterEach(func() {
|
||||
@ -130,32 +130,32 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
logsCSIPods("app=csi-rbdplugin", c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
|
||||
err := deleteConfigMap(rbdDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap: %v", err)
|
||||
framework.Failf("failed to delete configmap: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner secret: %v", err)
|
||||
framework.Failf("failed to delete provisioner secret: %v", err)
|
||||
}
|
||||
err = c.CoreV1().
|
||||
Secrets(cephCSINamespace).
|
||||
Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret: %v", err)
|
||||
framework.Failf("failed to delete node secret: %v", err)
|
||||
}
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
framework.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
err = deleteResource(rbdExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete snapshotclass: %v", err)
|
||||
framework.Failf("failed to delete snapshotclass: %v", err)
|
||||
}
|
||||
deleteVault()
|
||||
if deployRBD {
|
||||
@ -163,17 +163,17 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
if cephCSINamespace != defaultNs {
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete namespace: %v", err)
|
||||
framework.Failf("failed to delete namespace: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = deleteNodeLabel(c, nodeRegionLabel)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node label: %v", err)
|
||||
framework.Failf("failed to delete node label: %v", err)
|
||||
}
|
||||
err = deleteNodeLabel(c, nodeZoneLabel)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node label: %v", err)
|
||||
framework.Failf("failed to delete node label: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -189,14 +189,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err)
|
||||
framework.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err)
|
||||
framework.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -208,13 +208,13 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
|
||||
pvc, err = loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
app, err = loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = appLabel
|
||||
app.Namespace = f.UniqueName
|
||||
@ -222,7 +222,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc: %v", err)
|
||||
framework.Failf("failed to create pvc: %v", err)
|
||||
}
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
||||
@ -238,22 +238,22 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
app.Namespace,
|
||||
&opt)
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to write data to a file %s", stdErr)
|
||||
framework.Failf("failed to write data to a file %s", stdErr)
|
||||
}
|
||||
|
||||
// force an immediate write of all cached data to disk.
|
||||
_, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt)
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to sync data to a disk %s", stdErr)
|
||||
framework.Failf("failed to sync data to a disk %s", stdErr)
|
||||
}
|
||||
|
||||
opt = metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("app=%s", appLabel),
|
||||
}
|
||||
e2elog.Logf("Calculating checksum of %s", filePath)
|
||||
framework.Logf("Calculating checksum of %s", filePath)
|
||||
checkSum, err = calculateSHA512sum(f, app, filePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
|
||||
// Create snapshot of the pvc
|
||||
@ -264,30 +264,30 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshot %v", err)
|
||||
framework.Failf("failed to create snapshot %v", err)
|
||||
}
|
||||
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete application: %v", err)
|
||||
framework.Failf("failed to delete application: %v", err)
|
||||
}
|
||||
deleteRBDPlugin()
|
||||
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to change directory: %v", err)
|
||||
framework.Failf("failed to change directory: %v", err)
|
||||
}
|
||||
|
||||
deployRBDPlugin()
|
||||
|
||||
err = waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s: %v", rbdDeploymentName, err)
|
||||
framework.Failf("timeout waiting for upgraded deployment %s: %v", rbdDeploymentName, err)
|
||||
}
|
||||
|
||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", rbdDaemonsetName, err)
|
||||
framework.Failf("timeout waiting for upgraded daemonset %s: %v", rbdDaemonsetName, err)
|
||||
}
|
||||
|
||||
// validate if the app gets bound to a pvc created by
|
||||
@ -295,7 +295,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
app.Labels = label
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
framework.Failf("failed to create application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -305,14 +305,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
label := make(map[string]string)
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
pvcClone.Spec.DataSource.Name = "rbd-pvc-snapshot"
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = "validate-snap-clone"
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -320,7 +320,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
appClone.Labels = label
|
||||
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc: %v", err)
|
||||
framework.Failf("failed to create pvc: %v", err)
|
||||
}
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
||||
@ -329,20 +329,20 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
testFilePath := filepath.Join(mountPath, "testClone")
|
||||
newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
if strings.Compare(newCheckSum, checkSum) != 0 {
|
||||
e2elog.Failf(
|
||||
framework.Failf(
|
||||
"The checksum of files did not match, expected %s received %s",
|
||||
checkSum,
|
||||
newCheckSum)
|
||||
}
|
||||
e2elog.Logf("The checksum of files matched")
|
||||
framework.Logf("The checksum of files matched")
|
||||
|
||||
// delete cloned pvc and pod
|
||||
err = deletePVCAndApp("", f, pvcClone, appClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc and application: %v", err)
|
||||
framework.Failf("failed to delete pvc and application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -353,14 +353,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
|
||||
pvcClone, err := loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvc: %v", err)
|
||||
framework.Failf("failed to load pvc: %v", err)
|
||||
}
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
appClone, err := loadApp(appSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
label[appKey] = "validate-clone"
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -368,7 +368,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
appClone.Labels = label
|
||||
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc: %v", err)
|
||||
framework.Failf("failed to create pvc: %v", err)
|
||||
}
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
||||
@ -377,20 +377,20 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
testFilePath := filepath.Join(mountPath, "testClone")
|
||||
newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
if strings.Compare(newCheckSum, checkSum) != 0 {
|
||||
e2elog.Failf(
|
||||
framework.Failf(
|
||||
"The checksum of files did not match, expected %s received %s",
|
||||
checkSum,
|
||||
newCheckSum)
|
||||
}
|
||||
e2elog.Logf("The checksum of files matched")
|
||||
framework.Logf("The checksum of files matched")
|
||||
|
||||
// delete cloned pvc and pod
|
||||
err = deletePVCAndApp("", f, pvcClone, appClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc and application: %v", err)
|
||||
framework.Failf("failed to delete pvc and application: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -405,41 +405,41 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
var err error
|
||||
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get pvc: %v", err)
|
||||
framework.Failf("failed to get pvc: %v", err)
|
||||
}
|
||||
|
||||
// resize PVC
|
||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to expand pvc: %v", err)
|
||||
framework.Failf("failed to expand pvc: %v", err)
|
||||
}
|
||||
// wait for application pod to come up after resize
|
||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for pod to be in running state: %v", err)
|
||||
framework.Failf("timeout waiting for pod to be in running state: %v", err)
|
||||
}
|
||||
// validate if resize is successful.
|
||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to check directory size: %v", err)
|
||||
framework.Failf("failed to check directory size: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("delete pvc and app", func() {
|
||||
err := deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc and application: %v", err)
|
||||
framework.Failf("failed to delete pvc and application: %v", err)
|
||||
}
|
||||
})
|
||||
// delete RBD provisioner secret
|
||||
err := deleteCephUser(f, keyringRBDProvisionerUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err)
|
||||
}
|
||||
// delete RBD plugin secret
|
||||
err = deleteCephUser(f, keyringRBDNodePluginUsername)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err)
|
||||
framework.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
138
e2e/utils.go
138
e2e/utils.go
@ -41,7 +41,7 @@ import (
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
)
|
||||
|
||||
/* #nosec:G101, values not credentials, just a reference to the location.*/
|
||||
@ -218,11 +218,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), exitOneErr) {
|
||||
e2elog.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr)
|
||||
framework.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr)
|
||||
}
|
||||
}
|
||||
if stdErr != "" {
|
||||
e2elog.Failf("failed to execute rados command '%s' : stdErr=%s", cmd, stdErr)
|
||||
framework.Failf("failed to execute rados command '%s' : stdErr=%s", cmd, stdErr)
|
||||
}
|
||||
err = compareStdoutWithCount(stdOut, count)
|
||||
if err == nil {
|
||||
@ -232,10 +232,10 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
|
||||
if strings.Contains(err.Error(), "expected omap object count") {
|
||||
stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace)
|
||||
if err == nil {
|
||||
e2elog.Logf("additional debug info: rados ls command output: %s, stdErr: %s", stdOut, stdErr)
|
||||
framework.Logf("additional debug info: rados ls command output: %s, stdErr: %s", stdOut, stdErr)
|
||||
}
|
||||
}
|
||||
e2elog.Failf("%v", saveErr)
|
||||
framework.Failf("%v", saveErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -320,11 +320,11 @@ func getSecret(path string) (v1.Secret, error) {
|
||||
func deleteResource(scPath string) error {
|
||||
data, err := replaceNamespaceInTemplate(scPath)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", scPath, err)
|
||||
framework.Logf("failed to read content from %s %v", scPath, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete %s %v", scPath, err)
|
||||
framework.Logf("failed to delete %s %v", scPath, err)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -789,21 +789,21 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.
|
||||
// write data in PVC
|
||||
err := writeDataInPod(app, opt, f)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to write data in the pod: %v", err)
|
||||
framework.Logf("failed to write data in the pod: %v", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
checkSum, err := calculateSHA512sum(f, app, filePath, opt)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to calculate checksum: %v", err)
|
||||
framework.Logf("failed to calculate checksum: %v", err)
|
||||
|
||||
return checkSum, err
|
||||
}
|
||||
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pod: %v", err)
|
||||
framework.Failf("failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
return checkSum, nil
|
||||
@ -824,18 +824,18 @@ func validatePVCClone(
|
||||
chErrs := make([]error, totalCount)
|
||||
pvc, err := loadPVC(sourcePvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
label := make(map[string]string)
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
app, err := loadApp(sourceAppPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load app: %v", err)
|
||||
framework.Failf("failed to load app: %v", err)
|
||||
}
|
||||
label[appKey] = appLabel
|
||||
app.Namespace = f.UniqueName
|
||||
@ -848,19 +848,19 @@ func validatePVCClone(
|
||||
checkSum := ""
|
||||
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get pvc %v", err)
|
||||
framework.Failf("failed to get pvc %v", err)
|
||||
}
|
||||
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
|
||||
checkSum, err = writeDataAndCalChecksum(app, &opt, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
pvcClone, err := loadPVC(clonePvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
@ -870,7 +870,7 @@ func validatePVCClone(
|
||||
|
||||
appClone, err := loadApp(clonePvcAppPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appClone.Namespace = f.UniqueName
|
||||
wg.Add(totalCount)
|
||||
@ -902,7 +902,7 @@ func validatePVCClone(
|
||||
// check new passphrase created
|
||||
stdOut, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle)
|
||||
if stdOut != "" {
|
||||
e2elog.Logf("successfully read the passphrase from vault: %s", stdOut)
|
||||
framework.Logf("successfully read the passphrase from vault: %s", stdOut)
|
||||
}
|
||||
if stdErr != "" {
|
||||
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
||||
@ -913,14 +913,14 @@ func validatePVCClone(
|
||||
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem && wgErrs[n] == nil {
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
e2elog.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
framework.Logf("Calculating checksum clone for filepath %s", filePath)
|
||||
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
||||
e2elog.Logf("checksum for clone is %s", checkSumClone)
|
||||
framework.Logf("checksum for clone is %s", checkSumClone)
|
||||
if chErrs[n] != nil {
|
||||
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
framework.Logf("Failed calculating checksum clone %s", chErrs[n])
|
||||
}
|
||||
if checkSumClone != checkSum {
|
||||
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
||||
}
|
||||
}
|
||||
if wgErrs[n] == nil && validatePVC != nil && kms != noKMS {
|
||||
@ -935,23 +935,23 @@ func validatePVCClone(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating PVCs failed, %d errors were logged", failed)
|
||||
framework.Failf("creating PVCs failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
for i, err := range chErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
framework.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
// total images in cluster is 1 parent rbd image+ total
|
||||
@ -961,7 +961,7 @@ func validatePVCClone(
|
||||
// delete parent pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
framework.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
|
||||
totalCloneCount = totalCount + totalCount
|
||||
@ -1013,12 +1013,12 @@ func validatePVCClone(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
@ -1037,28 +1037,28 @@ func validatePVCSnapshot(
|
||||
chErrs := make([]error, totalCount)
|
||||
err := createRBDSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
framework.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = deleteRBDSnapshotClass()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
framework.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
label := make(map[string]string)
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
framework.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load app: %v", err)
|
||||
framework.Failf("failed to load app: %v", err)
|
||||
}
|
||||
// write data in PVC
|
||||
label[appKey] = appLabel
|
||||
@ -1070,7 +1070,7 @@ func validatePVCSnapshot(
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||
checkSum, err := writeDataAndCalChecksum(app, &opt, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to calculate checksum: %v", err)
|
||||
framework.Failf("failed to calculate checksum: %v", err)
|
||||
}
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
snap := getSnapshot(snapshotPath)
|
||||
@ -1110,23 +1110,23 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating snapshots failed, %d errors were logged", failed)
|
||||
framework.Failf("creating snapshots failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
// total images in cluster is 1 parent rbd image+ total snaps
|
||||
validateRBDImageCount(f, totalCount+1, defaultRBDPool)
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
framework.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
framework.Failf("failed to load application: %v", err)
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone.Namespace = f.UniqueName
|
||||
@ -1170,14 +1170,14 @@ func validatePVCSnapshot(
|
||||
if wgErrs[n] == nil {
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
e2elog.Logf("calculating checksum clone for filepath %s", filePath)
|
||||
framework.Logf("calculating checksum clone for filepath %s", filePath)
|
||||
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
||||
e2elog.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name)
|
||||
framework.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name)
|
||||
if chErrs[n] != nil {
|
||||
e2elog.Logf("failed to calculte checksum for clone: %s", chErrs[n])
|
||||
framework.Logf("failed to calculte checksum for clone: %s", chErrs[n])
|
||||
}
|
||||
if checkSumClone != checkSum {
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"checksum value didn't match. checksum=%s and checksumclone=%s",
|
||||
checkSum,
|
||||
checkSumClone)
|
||||
@ -1191,23 +1191,23 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
||||
framework.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
for i, err := range chErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
framework.Failf("calculating checksum failed, %d errors were logged", failed)
|
||||
}
|
||||
// total images in cluster is 1 parent rbd image+ total
|
||||
// snaps+ total clones
|
||||
@ -1228,12 +1228,12 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
// total images in cluster is 1 parent rbd image+ total
|
||||
@ -1259,12 +1259,12 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
||||
framework.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
// total images in cluster is 1 parent rbd image+ total
|
||||
@ -1274,7 +1274,7 @@ func validatePVCSnapshot(
|
||||
// delete parent pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
framework.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
|
||||
// total images in cluster is total snaps+ total clones
|
||||
@ -1325,12 +1325,12 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting snapshots failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting snapshots failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
validateRBDImageCount(f, totalCount, defaultRBDPool)
|
||||
@ -1349,12 +1349,12 @@ func validatePVCSnapshot(
|
||||
for i, err := range wgErrs {
|
||||
if err != nil {
|
||||
// not using Failf() as it aborts the test and does not log other errors
|
||||
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
if failed != 0 {
|
||||
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
||||
}
|
||||
|
||||
// validate created backend rbd images
|
||||
@ -1436,7 +1436,7 @@ func validateController(
|
||||
pv.ResourceVersion = ""
|
||||
err = createPVCAndPV(f.ClientSet, pvc, pv)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC or PV: %v", err)
|
||||
framework.Failf("failed to create PVC or PV: %v", err)
|
||||
}
|
||||
// bind PVC to application
|
||||
app, err := loadApp(appPath)
|
||||
@ -1496,7 +1496,7 @@ func validateController(
|
||||
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
|
||||
v, err := c.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get server version: %v", err)
|
||||
framework.Failf("failed to get server version: %v", err)
|
||||
// Failf() marks the case as failure, and returns from the
|
||||
// Go-routine that runs the case. This function will not have a
|
||||
// return value.
|
||||
@ -1514,7 +1514,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
|
||||
e2elog.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
|
||||
framework.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
|
||||
|
||||
return wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
j, err := c.BatchV1().Jobs(ns).Get(context.TODO(), job, metav1.GetOptions{})
|
||||
@ -1531,7 +1531,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
|
||||
return true, nil
|
||||
}
|
||||
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"Job %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, job, int(time.Since(start).Seconds()))
|
||||
|
||||
@ -1561,7 +1561,7 @@ func (ka kubectlAction) String() string {
|
||||
// no error occurred, or the timeout passed.
|
||||
func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
e2elog.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
|
||||
framework.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
@ -1571,7 +1571,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
||||
}
|
||||
cmd = append(cmd, []string{string(action), "-f", "-"}...)
|
||||
|
||||
_, err := framework.RunKubectlInput(namespace, data, cmd...)
|
||||
_, err := e2ekubectl.RunKubectlInput(namespace, data, cmd...)
|
||||
if err != nil {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -1582,7 +1582,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
||||
if action == kubectlDelete && isNotFoundCLIError(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"will run kubectl (%s) args (%s) again (%d seconds elapsed)",
|
||||
action,
|
||||
args,
|
||||
@ -1600,7 +1600,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
||||
// occurred, or the timeout passed.
|
||||
func retryKubectlFile(namespace string, action kubectlAction, filename string, t int, args ...string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
e2elog.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
|
||||
framework.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
@ -1610,7 +1610,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
}
|
||||
cmd = append(cmd, []string{string(action), "-f", filename}...)
|
||||
|
||||
_, err := framework.RunKubectl(namespace, cmd...)
|
||||
_, err := e2ekubectl.RunKubectl(namespace, cmd...)
|
||||
if err != nil {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -1621,7 +1621,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
if action == kubectlDelete && isNotFoundCLIError(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"will run kubectl (%s -f %q args %s) again (%d seconds elapsed)",
|
||||
action,
|
||||
filename,
|
||||
@ -1642,11 +1642,11 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
args = append([]string{string(action)}, args...)
|
||||
e2elog.Logf("waiting for kubectl (%s args) to finish", args)
|
||||
framework.Logf("waiting for kubectl (%s args) to finish", args)
|
||||
start := time.Now()
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := framework.RunKubectl(namespace, args...)
|
||||
_, err := e2ekubectl.RunKubectl(namespace, args...)
|
||||
if err != nil {
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -1657,7 +1657,7 @@ func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...str
|
||||
if action == kubectlDelete && isNotFoundCLIError(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
framework.Logf(
|
||||
"will run kubectl (%s) again (%d seconds elapsed)",
|
||||
args,
|
||||
int(time.Since(start).Seconds()))
|
||||
|
Reference in New Issue
Block a user