mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 14:20:19 +00:00
e2e: rework on E2E framework
rework of E2E framework for better code organization and add more helpful logs for debugging. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
3ea22bc5a8
commit
b4693dcffe
1041
e2e/cephfs.go
1041
e2e/cephfs.go
File diff suppressed because it is too large
Load Diff
131
e2e/cephfs_helper.go
Normal file
131
e2e/cephfs_helper.go
Normal file
@ -0,0 +1,131 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
adminUser = "admin"
|
||||
)
|
||||
|
||||
// validateSubvolumegroup validates whether subvolumegroup is present.
|
||||
func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp)
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to getpath for subvolumegroup %s with error %v", subvolgrp, stdErr)
|
||||
}
|
||||
expectedGrpPath := "/volumes/" + subvolgrp
|
||||
stdOut = strings.TrimSpace(stdOut)
|
||||
if stdOut != expectedGrpPath {
|
||||
return fmt.Errorf("error unexpected group path. Found: %s", stdOut)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, clusterID string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sc.Parameters["fsName"] = "myfs"
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephfsProvisionerSecretName
|
||||
|
||||
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephfsProvisionerSecretName
|
||||
|
||||
sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephfsNodePluginSecretName
|
||||
|
||||
if enablePool {
|
||||
sc.Parameters["pool"] = "myfs-data0"
|
||||
}
|
||||
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||
}
|
||||
// remove new line present in fsID
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
if clusterID != "" {
|
||||
fsID = clusterID
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
||||
sc, err := getSecret(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting admin key %v", stdErr)
|
||||
}
|
||||
sc.StringData["adminID"] = adminUser
|
||||
sc.StringData["adminKey"] = adminKey
|
||||
delete(sc.StringData, "userID")
|
||||
delete(sc.StringData, "userKey")
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, stdErr, err := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error deleting backing volume %s %v", imageData.imageName, stdErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cephfsSubVolume struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string) ([]cephfsSubVolume, error) {
|
||||
var subVols []cephfsSubVolume
|
||||
stdout, stdErr, err := execCommandInToolBoxPod(f, fmt.Sprintf("ceph fs subvolume ls %s --group_name=%s --format=json", filesystem, groupname), rookNamespace)
|
||||
if err != nil {
|
||||
return subVols, err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return subVols, fmt.Errorf("error listing subolumes %v", stdErr)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(stdout), &subVols)
|
||||
if err != nil {
|
||||
return subVols, err
|
||||
}
|
||||
return subVols, nil
|
||||
}
|
120
e2e/configmap.go
Normal file
120
e2e/configmap.go
Normal file
@ -0,0 +1,120 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func deleteConfigMap(pluginPath string) error {
|
||||
path := pluginPath + configMap
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Framework) error {
|
||||
path := pluginPath + configMap
|
||||
cm := v1.ConfigMap{}
|
||||
err := unmarshal(path, &cm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||
}
|
||||
// remove new line present in fsID
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
// get mon list
|
||||
mons, err := getMons(rookNamespace, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conmap := []util.ClusterInfo{{
|
||||
ClusterID: fsID,
|
||||
Monitors: mons,
|
||||
RadosNamespace: radosNamespace,
|
||||
}}
|
||||
if upgradeTesting {
|
||||
subvolumegroup = "csi"
|
||||
}
|
||||
conmap[0].CephFS.SubvolumeGroup = subvolumegroup
|
||||
data, err := json.Marshal(conmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cm.Data["config.json"] = string(data)
|
||||
cm.Namespace = cephCSINamespace
|
||||
// if the configmap is present update it,during cephcsi helm charts
|
||||
// deployment empty configmap gets created we need to override it
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{})
|
||||
|
||||
if err == nil {
|
||||
_, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||
if updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// createCustomConfigMap provides multiple clusters information.
|
||||
func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) error {
|
||||
path := pluginPath + configMap
|
||||
cm := v1.ConfigMap{}
|
||||
err := unmarshal(path, &cm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// get mon list
|
||||
mons, err := getMons(rookNamespace, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// get clusterIDs
|
||||
var clusterID []string
|
||||
for key := range subvolgrpInfo {
|
||||
clusterID = append(clusterID, key)
|
||||
}
|
||||
conmap := []util.ClusterInfo{
|
||||
{
|
||||
ClusterID: clusterID[0],
|
||||
Monitors: mons,
|
||||
},
|
||||
{
|
||||
ClusterID: clusterID[1],
|
||||
Monitors: mons,
|
||||
}}
|
||||
for i := 0; i < len(subvolgrpInfo); i++ {
|
||||
conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]]
|
||||
}
|
||||
data, err := json.Marshal(conmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cm.Data["config.json"] = string(data)
|
||||
cm.Namespace = cephCSINamespace
|
||||
// since a configmap is already created, update the existing configmap
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
75
e2e/namespace.go
Normal file
75
e2e/namespace.go
Normal file
@ -0,0 +1,75 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func createNamespace(c kubernetes.Interface, name string) error {
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
ns := &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
_, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||
if err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func replaceNamespaceInTemplate(filePath string) (string, error) {
|
||||
read, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.ReplaceAll(string(read), "namespace: default", fmt.Sprintf("namespace: %s", cephCSINamespace)), nil
|
||||
}
|
44
e2e/node.go
Normal file
44
e2e/node.go
Normal file
@ -0,0 +1,44 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error {
|
||||
// NOTE: This makes all nodes (in a multi-node setup) in the test take
|
||||
// the same label values, which is fine for the test
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteNodeLabel(c kubernetes.Interface, labelKey string) error {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) error {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||
}
|
||||
return nil
|
||||
}
|
212
e2e/pod.go
Normal file
212
e2e/pod.go
Normal file
@ -0,0 +1,212 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return false, nil
|
||||
}
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
dNum := ds.Status.DesiredNumberScheduled
|
||||
ready := ds.Status.NumberReady
|
||||
e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
|
||||
if ready != dNum {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete.
|
||||
|
||||
func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) error {
|
||||
var (
|
||||
deployment *appsv1.Deployment
|
||||
reason string
|
||||
err error
|
||||
)
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// TODO need to check rolling update
|
||||
|
||||
// When the deployment status and its underlying resources reach the
|
||||
// desired state, we're done
|
||||
if deployment.Status.Replicas == deployment.Status.ReadyReplicas {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("deployment status: expected replica count %d running replica count %d", deployment.Status.Replicas, deployment.Status.ReadyReplicas)
|
||||
reason = fmt.Sprintf("deployment status: %#v", deployment.Status.String())
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if errors.Is(err, wait.ErrWaitTimeout) {
|
||||
err = fmt.Errorf("%s", reason)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (framework.ExecOptions, error) {
|
||||
cmd := []string{"/bin/sh", "-c", c}
|
||||
podList, err := f.PodClientNS(ns).List(context.TODO(), *opt)
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) == 0 {
|
||||
return framework.ExecOptions{}, errors.New("podlist is empty")
|
||||
}
|
||||
if err != nil {
|
||||
return framework.ExecOptions{}, err
|
||||
}
|
||||
return framework.ExecOptions{
|
||||
Command: cmd,
|
||||
PodName: podList.Items[0].Name,
|
||||
Namespace: ns,
|
||||
ContainerName: podList.Items[0].Spec.Containers[0].Name,
|
||||
Stdin: nil,
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
PreserveWhitespace: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string, error) {
|
||||
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
return stdOut, stdErr, err
|
||||
}
|
||||
|
||||
func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string, error) {
|
||||
opt := &metav1.ListOptions{
|
||||
LabelSelector: rookTolBoxPodLabel,
|
||||
}
|
||||
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||
}
|
||||
return stdOut, stdErr, err
|
||||
}
|
||||
|
||||
func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) {
|
||||
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
}
|
||||
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||
if err != nil {
|
||||
e2elog.Logf("command %s failed: %v", c, err)
|
||||
}
|
||||
return stdOut, stdErr
|
||||
}
|
||||
|
||||
func loadApp(path string) (*v1.Pod, error) {
|
||||
app := v1.Pod{}
|
||||
err := unmarshal(path, &app)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
|
||||
_, err := c.CoreV1().Pods(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout)
|
||||
}
|
||||
|
||||
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Running state", name)
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds()))
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting for pod %v to be deleted", name)
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns))
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete pod %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
171
e2e/pvc.go
Normal file
171
e2e/pvc.go
Normal file
@ -0,0 +1,171 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func loadPVC(path string) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc := &v1.PersistentVolumeClaim{}
|
||||
err := unmarshal(path, &pvc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pvc, err
|
||||
}
|
||||
|
||||
func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
pv := &v1.PersistentVolume{}
|
||||
var err error
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if timeout == 0 {
|
||||
return nil
|
||||
}
|
||||
name := pvc.Name
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if pvc.Spec.VolumeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
err = e2epv.WaitOnPVandPVC(c, pvc.Namespace, pv, pvc)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
nameSpace := pvc.Namespace
|
||||
name := pvc.Name
|
||||
var err error
|
||||
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete of PVC %v failed: %w", name, err)
|
||||
}
|
||||
start := time.Now()
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// Check that the PVC is really deleted.
|
||||
e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds()))
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", name, err)
|
||||
}
|
||||
|
||||
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, fmt.Errorf("delete PV %v failed with error other than \"not found\": %w", pv.Name, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// getBoundPV returns a PV details.
|
||||
func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||
// Get new copy of the claim
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
return pv, err
|
||||
}
|
||||
|
||||
func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||
pv, err := getBoundPV(f.ClientSet, pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
|
||||
return errors.New("found empty NodeSelectorTerms in PV")
|
||||
}
|
||||
|
||||
rFound := false
|
||||
zFound := false
|
||||
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
|
||||
switch expression.Key {
|
||||
case nodeCSIRegionLabel:
|
||||
if rFound {
|
||||
return errors.New("found multiple occurrences of topology key for region")
|
||||
}
|
||||
rFound = true
|
||||
if expression.Values[0] != regionValue {
|
||||
return errors.New("topology value for region label mismatch")
|
||||
}
|
||||
case nodeCSIZoneLabel:
|
||||
if zFound {
|
||||
return errors.New("found multiple occurrences of topology key for zone")
|
||||
}
|
||||
zFound = true
|
||||
if expression.Values[0] != zoneValue {
|
||||
return errors.New("topology value for zone label mismatch")
|
||||
}
|
||||
default:
|
||||
return errors.New("unexpected key in node selector terms found in PV")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
830
e2e/rbd.go
830
e2e/rbd.go
File diff suppressed because it is too large
Load Diff
390
e2e/rbd_helper.go
Normal file
390
e2e/rbd_helper.go
Normal file
@ -0,0 +1,390 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
scv1 "k8s.io/api/storage/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
func imageSpec(pool, image string) string {
|
||||
if radosNamespace != "" {
|
||||
return pool + "/" + radosNamespace + "/" + image
|
||||
}
|
||||
return pool + "/" + image
|
||||
}
|
||||
|
||||
func rbdOptions(pool string) string {
|
||||
if radosNamespace != "" {
|
||||
return "--pool=" + pool + " --namespace " + radosNamespace
|
||||
}
|
||||
return "--pool=" + pool
|
||||
}
|
||||
|
||||
func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOptions, parameters map[string]string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
sc.Parameters["pool"] = defaultRBDPool
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = rbdProvisionerSecretName
|
||||
|
||||
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName
|
||||
|
||||
sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace
|
||||
sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName
|
||||
|
||||
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||
}
|
||||
// remove new line present in fsID
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
for k, v := range parameters {
|
||||
sc.Parameters[k] = v
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
|
||||
if scOptions["volumeBindingMode"] == "WaitForFirstConsumer" {
|
||||
value := scv1.VolumeBindingWaitForFirstConsumer
|
||||
sc.VolumeBindingMode = &value
|
||||
}
|
||||
|
||||
// comma separated mount options
|
||||
if opt, ok := scOptions[rbdmountOptions]; ok {
|
||||
mOpt := strings.Split(opt, ",")
|
||||
sc.MountOptions = append(sc.MountOptions, mOpt...)
|
||||
}
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func createRadosNamespace(f *framework.Framework) error {
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace ls --pool=%s", defaultRBDPool), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error listing rbd namespace %v", stdErr)
|
||||
}
|
||||
if !strings.Contains(stdOut, radosNamespace) {
|
||||
_, stdErr, err = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace create %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error creating rbd namespace %v", stdErr)
|
||||
}
|
||||
}
|
||||
stdOut, stdErr, err = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace ls --pool=%s", rbdTopologyPool), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error listing rbd namespace %v", stdErr)
|
||||
}
|
||||
|
||||
if !strings.Contains(stdOut, radosNamespace) {
|
||||
_, stdErr, err = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace create %s", rbdOptions(rbdTopologyPool)), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error creating rbd namespace %v", stdErr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createRBDSecret(c kubernetes.Interface, f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "secret.yaml")
|
||||
sc, err := getSecret(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting admin key %v", stdErr)
|
||||
}
|
||||
sc.StringData["userID"] = adminUser
|
||||
sc.StringData["userKey"] = adminKey
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = updateSecretForEncryption(c)
|
||||
return err
|
||||
}
|
||||
|
||||
type imageInfoFromPVC struct {
|
||||
imageID string
|
||||
imageName string
|
||||
csiVolumeHandle string
|
||||
pvName string
|
||||
}
|
||||
|
||||
// getImageInfoFromPVC reads volume handle of the bound PV to the passed in PVC,
|
||||
// and returns imageInfoFromPVC or error.
|
||||
func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (imageInfoFromPVC, error) {
|
||||
var imageData imageInfoFromPVC
|
||||
|
||||
c := f.ClientSet.CoreV1()
|
||||
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return imageData, err
|
||||
}
|
||||
|
||||
pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return imageData, err
|
||||
}
|
||||
|
||||
imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
|
||||
imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle)
|
||||
|
||||
imageData = imageInfoFromPVC{
|
||||
imageID: imageID,
|
||||
imageName: fmt.Sprintf("csi-vol-%s", imageID),
|
||||
csiVolumeHandle: pv.Spec.CSI.VolumeHandle,
|
||||
pvName: pv.Name,
|
||||
}
|
||||
return imageData, nil
|
||||
}
|
||||
|
||||
func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) {
|
||||
cmd := fmt.Sprintf("rbd image-meta get %s %s", rbdImageSpec, metaKey)
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||
}
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
}
|
||||
|
||||
func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) error {
|
||||
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName)
|
||||
encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if encryptedState != "encrypted" {
|
||||
return fmt.Errorf("%v not equal to encrypted", encryptedState)
|
||||
}
|
||||
|
||||
volumeMountPath := app.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||
mountType, err := getMountType(app.Name, app.Namespace, volumeMountPath, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mountType != "crypt" {
|
||||
return fmt.Errorf("%v not equal to crypt", mountType)
|
||||
}
|
||||
|
||||
if kms == "vault" {
|
||||
// check new passphrase created
|
||||
_, stdErr := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kms == "vault" {
|
||||
// check new passphrase created
|
||||
stdOut, _ := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||
if stdOut != "" {
|
||||
return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func listRBDImages(f *framework.Framework) ([]string, error) {
|
||||
var imgInfos []string
|
||||
|
||||
stdout, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd ls --format=json %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||
if err != nil {
|
||||
return imgInfos, err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return imgInfos, fmt.Errorf("failed to list images %v", stdErr)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(stdout), &imgInfos)
|
||||
if err != nil {
|
||||
return imgInfos, err
|
||||
}
|
||||
return imgInfos, nil
|
||||
}
|
||||
|
||||
func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName)
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
return err
|
||||
}
|
||||
|
||||
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||
var cmds = []string{}
|
||||
if cephfs {
|
||||
// ceph fs fail
|
||||
// ceph fs rm myfs --yes-i-really-mean-it
|
||||
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||
// --yes-i-really-mean-it
|
||||
// ceph osd pool delete myfs-data0 myfs-data0
|
||||
// --yes-i-really-mean-it
|
||||
cmds = append(cmds, fmt.Sprintf("ceph fs fail %s", name),
|
||||
fmt.Sprintf("ceph fs rm %s --yes-i-really-mean-it", name),
|
||||
fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name),
|
||||
fmt.Sprintf("ceph osd pool delete %s-data0 %s-data0 --yes-i-really-really-mean-it", name, name))
|
||||
} else {
|
||||
// ceph osd pool delete replicapool replicapool
|
||||
// --yes-i-really-mean-it
|
||||
cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name))
|
||||
}
|
||||
|
||||
for _, cmd := range cmds {
|
||||
// discard stdErr as some commands prints warning in strErr
|
||||
_, _, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd info %s", imageSpec(pool, imageData.imageName)), rookNamespace)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return "", fmt.Errorf("failed to get rbd info %v", stdErr)
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
}
|
||||
|
||||
return stdOut, nil
|
||||
}
|
||||
|
||||
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||
_, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error {
|
||||
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.Contains(stdOut, "data_pool: "+dataPool) {
|
||||
return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rados listomapkeys %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to listomapkeys %v", stdErr)
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found image journal %s in pool %s namespace %s", "csi.volume."+imageData.imageID, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rados getomapval %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||
}
|
||||
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s namespace %s", "csi.volume."+imageData.pvName, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -161,8 +161,10 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
||||
output, stdErr := execCommandInPod(f, cmd, ns, opt)
|
||||
|
||||
output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("failed to execute command in app pod %v", stdErr)
|
||||
return false, nil
|
||||
|
@ -113,32 +113,46 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
||||
})
|
||||
}
|
||||
|
||||
func createRBDSnapshotClass(f *framework.Framework) {
|
||||
func createRBDSnapshotClass(f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "snapshotclass.yaml")
|
||||
sc := getSnapshotClass(scPath)
|
||||
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||
|
||||
fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr)
|
||||
}
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
sclient, err := newSnapshotClient()
|
||||
Expect(err).Should(BeNil())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
return err
|
||||
}
|
||||
|
||||
func createCephFSSnapshotClass(f *framework.Framework) {
|
||||
func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
||||
sc := getSnapshotClass(scPath)
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||
fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr)
|
||||
}
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
sclient, err := newSnapshotClient()
|
||||
Expect(err).Should(BeNil())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
Expect(err).Should(BeNil())
|
||||
return err
|
||||
}
|
||||
|
@ -91,7 +91,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
|
||||
c := f.ClientSet
|
||||
|
||||
fsID, e := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
fsID, e, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
||||
}
|
||||
@ -101,7 +104,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
// create rbd image
|
||||
cmd := fmt.Sprintf("rbd create %s --size=%d --image-feature=layering %s", rbdImageName, 4096, rbdOptions(defaultRBDPool))
|
||||
|
||||
_, e = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
_, e, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to create rbd image %s", e)
|
||||
}
|
||||
@ -115,7 +121,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
|
||||
pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt)
|
||||
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Create API error: %w", err)
|
||||
}
|
||||
@ -155,10 +161,11 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
||||
execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
return nil
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint:gocyclo // reduce complexity
|
||||
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
||||
opt := make(map[string]string)
|
||||
var (
|
||||
@ -180,7 +187,10 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
LabelSelector: "app=rook-ceph-tools",
|
||||
}
|
||||
|
||||
fsID, e := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt)
|
||||
fsID, e, err := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
||||
}
|
||||
@ -193,21 +203,30 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
// create subvolumegroup, command will work even if group is already present.
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fsName, groupName)
|
||||
|
||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to create subvolumegroup %s", e)
|
||||
}
|
||||
|
||||
// create subvolume
|
||||
cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fsName, cephFsVolName, groupName, size)
|
||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to create subvolume %s", e)
|
||||
}
|
||||
|
||||
// get rootpath
|
||||
cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fsName, cephFsVolName, groupName)
|
||||
rootPath, e := execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
rootPath, e, err := execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to get rootpath %s", e)
|
||||
}
|
||||
@ -215,17 +234,22 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
rootPath = strings.Trim(rootPath, "\n")
|
||||
|
||||
// create secret
|
||||
userID := "admin" // nolint
|
||||
secret := getSecret(scPath)
|
||||
adminKey, e := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt)
|
||||
secret, err := getSecret(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adminKey, e, err := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to get adminKey %s", e)
|
||||
}
|
||||
secret.StringData["userID"] = userID
|
||||
secret.StringData["userID"] = adminUser
|
||||
secret.StringData["userKey"] = adminKey
|
||||
secret.Name = secretName
|
||||
secret.Namespace = cephCSINamespace
|
||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{})
|
||||
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create secret, error %w", err)
|
||||
}
|
||||
@ -280,14 +304,20 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
|
||||
// delete subvolume
|
||||
cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fsName, cephFsVolName, groupName)
|
||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to remove sub-volume %s", e)
|
||||
}
|
||||
|
||||
// delete subvolume group
|
||||
cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fsName, groupName)
|
||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to remove subvolume group %s", e)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
app *v1.Pod
|
||||
// cwd stores the initial working directory.
|
||||
cwd string
|
||||
err error
|
||||
)
|
||||
// deploy cephfs CSI
|
||||
BeforeEach(func() {
|
||||
@ -30,26 +31,34 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
c = f.ClientSet
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
err = createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create namespace with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// fetch current working directory to switch back
|
||||
// when we are done upgrading.
|
||||
var err error
|
||||
cwd, err = os.Getwd()
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to getwd with error %v", err)
|
||||
}
|
||||
err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to upgrade csi with error %v", err)
|
||||
}
|
||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
err = createCephfsSecret(f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create secret with error %v", err)
|
||||
}
|
||||
err = createCephfsStorageClass(f.ClientSet, f, true, "")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
createCephfsSecret(f.ClientSet, f)
|
||||
createCephfsStorageClass(f.ClientSet, f, true, "")
|
||||
})
|
||||
AfterEach(func() {
|
||||
if !testCephFS || !upgradeTesting {
|
||||
@ -63,15 +72,26 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// log node plugin
|
||||
logsCSIPods("app=csi-cephfsplugin", c)
|
||||
}
|
||||
deleteConfigMap(cephfsDirPath)
|
||||
deleteResource(cephfsExamplePath + "secret.yaml")
|
||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteConfigMap(cephfsDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
if deployCephFS {
|
||||
deleteCephfsPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete namespace with error %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -83,13 +103,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
By("checking provisioner deployment is running")
|
||||
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||
}
|
||||
|
||||
By("checking nodeplugin deamonsets is running")
|
||||
By("checking nodeplugin deamonset pods are running")
|
||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
|
||||
}
|
||||
|
||||
By("upgrade to latest changes and verify app re-mount", func() {
|
||||
@ -100,32 +120,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
|
||||
pvc, err = loadPVC(pvcPath)
|
||||
if pvc == nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to load pvc with error %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
e2elog.Logf("The PVC template %+v", pvc)
|
||||
|
||||
app, err = loadApp(appPath)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to load application with error %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create pvc and application with error %v", err)
|
||||
}
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to delete application with error %v", err)
|
||||
}
|
||||
deleteCephfsPlugin()
|
||||
|
||||
// switch back to current changes.
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to d chdir with error %v", err)
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
|
||||
@ -134,7 +153,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// an earlier release.
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create application with error %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -144,7 +163,6 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
v, err = f.ClientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get server version with error %v", err)
|
||||
Fail(err.Error())
|
||||
}
|
||||
// Resize 0.3.0 is only supported from v1.15+
|
||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
||||
@ -153,23 +171,23 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to get pvc with error %v", err)
|
||||
}
|
||||
|
||||
// resize PVC
|
||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to expand pvc with error %v", err)
|
||||
}
|
||||
// wait for application pod to come up after resize
|
||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timout waiting for pod to be in running state with error %v", err)
|
||||
}
|
||||
// validate if resize is successful.
|
||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to check directory size with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +196,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
By("delete pvc and app")
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to delete pvc and application with error %v", err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -32,28 +32,44 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create namespace with error %v", err)
|
||||
}
|
||||
}
|
||||
createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||
|
||||
// fetch current working directory to switch back
|
||||
// when we are done upgrading.
|
||||
var err error
|
||||
cwd, err = os.Getwd()
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to do getwd with error %v", err)
|
||||
}
|
||||
|
||||
deployVault(f.ClientSet, deployTimeout)
|
||||
err = upgradeAndDeployCSI(upgradeVersion, "rbd")
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to upgrade and deploy CSI with error %v", err)
|
||||
}
|
||||
err = createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
err = createRBDSecret(f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create secret with error %v", err)
|
||||
}
|
||||
|
||||
err = createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node label with error %v", err)
|
||||
}
|
||||
err = createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create node label with error %v", err)
|
||||
}
|
||||
createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||
createRBDSecret(f.ClientSet, f)
|
||||
})
|
||||
AfterEach(func() {
|
||||
if !testRBD || !upgradeTesting {
|
||||
@ -68,21 +84,36 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
logsCSIPods("app=csi-rbdplugin", c)
|
||||
}
|
||||
|
||||
deleteConfigMap(rbdDirPath)
|
||||
deleteResource(rbdExamplePath + "secret.yaml")
|
||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
err := deleteConfigMap(rbdDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
err = deleteResource(rbdExamplePath + "secret.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
deleteVault()
|
||||
if deployRBD {
|
||||
deleteRBDPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
err := deleteNamespace(c, cephCSINamespace)
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to delete namespace with error %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
deleteNodeLabel(c, nodeRegionLabel)
|
||||
deleteNodeLabel(c, nodeZoneLabel)
|
||||
err = deleteNodeLabel(c, nodeRegionLabel)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node label with error %v", err)
|
||||
}
|
||||
err = deleteNodeLabel(c, nodeZoneLabel)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node label with error %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
Context("Test RBD CSI", func() {
|
||||
@ -93,14 +124,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonsets is running", func() {
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -110,31 +141,30 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
var err error
|
||||
pvc, err = loadPVC(pvcPath)
|
||||
if pvc == nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to load pvc with error %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
e2elog.Logf("The PVC template %+v", pvc)
|
||||
|
||||
app, err = loadApp(appPath)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to load application with error %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create pvc with error %v", err)
|
||||
}
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to delete application with error %v", err)
|
||||
}
|
||||
deleteRBDPlugin()
|
||||
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to change directory with error %v", err)
|
||||
}
|
||||
|
||||
deployRBDPlugin()
|
||||
@ -143,7 +173,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to create application with error %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -153,7 +183,6 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get server version with error %v", err)
|
||||
Fail(err.Error())
|
||||
}
|
||||
// Resize 0.3.0 is only supported from v1.15+
|
||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
||||
@ -162,23 +191,23 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
}
|
||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to get pvc with error %v", err)
|
||||
}
|
||||
|
||||
// resize PVC
|
||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to expand pvc with error %v", err)
|
||||
}
|
||||
// wait for application pod to come up after resize
|
||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("timeout waiting for pod to be in running state with error %v", err)
|
||||
}
|
||||
// validate if resize is successful.
|
||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to check directory size with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,7 +216,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
By("delete pvc and app", func() {
|
||||
err := deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
e2elog.Failf("failed to delete pvc and application with error %v", err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
1020
e2e/utils.go
1020
e2e/utils.go
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user