2021-12-21 14:23:26 +00:00
|
|
|
/*
|
|
|
|
Copyright 2021 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2019-05-31 09:34:04 +00:00
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
2020-04-14 06:59:04 +00:00
|
|
|
"context"
|
2021-10-01 04:15:45 +00:00
|
|
|
"crypto/md5" //nolint:gosec // hash generation
|
2019-05-31 09:34:04 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"encoding/json"
|
2020-06-11 08:04:32 +00:00
|
|
|
"errors"
|
2019-05-31 09:34:04 +00:00
|
|
|
"fmt"
|
2022-01-21 09:32:34 +00:00
|
|
|
"os"
|
2020-09-20 00:43:26 +00:00
|
|
|
"regexp"
|
2021-08-02 11:12:05 +00:00
|
|
|
"strconv"
|
2019-05-31 09:34:04 +00:00
|
|
|
"strings"
|
2020-10-15 13:53:55 +00:00
|
|
|
"sync"
|
2019-05-31 09:34:04 +00:00
|
|
|
"time"
|
|
|
|
|
2022-05-24 03:43:35 +00:00
|
|
|
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
2021-10-28 06:42:57 +00:00
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
2021-07-08 08:33:17 +00:00
|
|
|
batch "k8s.io/api/batch/v1"
|
2019-05-31 09:34:04 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
|
|
|
scv1 "k8s.io/api/storage/v1"
|
2020-10-28 07:43:52 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2019-05-31 09:34:04 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2021-07-08 08:33:17 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2019-05-31 09:34:04 +00:00
|
|
|
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2019-06-24 07:58:39 +00:00
|
|
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
2019-05-31 09:34:04 +00:00
|
|
|
)
|
|
|
|
|
2021-06-10 11:10:48 +00:00
|
|
|
/* #nosec:G101, values not credentials, just a reference to the location.*/
|
2019-12-03 12:36:15 +00:00
|
|
|
const (
|
2021-03-22 05:48:22 +00:00
|
|
|
defaultNs = "default"
|
|
|
|
defaultSCName = ""
|
2020-04-07 08:35:05 +00:00
|
|
|
|
2022-04-25 11:47:58 +00:00
|
|
|
rbdType = "rbd"
|
|
|
|
cephfsType = "cephfs"
|
|
|
|
|
|
|
|
volumesType = "volumes"
|
|
|
|
snapsType = "snaps"
|
|
|
|
|
2021-03-18 10:01:16 +00:00
|
|
|
rookToolBoxPodLabel = "app=rook-ceph-tools"
|
2021-06-10 11:16:42 +00:00
|
|
|
rbdMountOptions = "mountOptions"
|
2020-10-28 06:08:11 +00:00
|
|
|
|
|
|
|
retainPolicy = v1.PersistentVolumeReclaimRetain
|
2020-10-28 07:43:52 +00:00
|
|
|
// deletePolicy is the default policy in E2E.
|
2020-10-28 06:08:11 +00:00
|
|
|
deletePolicy = v1.PersistentVolumeReclaimDelete
|
2021-07-08 14:59:34 +00:00
|
|
|
// Default key and label for Listoptions.
|
2022-04-06 12:05:18 +00:00
|
|
|
appKey = "app"
|
|
|
|
appLabel = "write-data-in-pod"
|
|
|
|
appCloneLabel = "app-clone"
|
2021-04-26 13:15:18 +00:00
|
|
|
|
2021-07-09 11:49:23 +00:00
|
|
|
noError = ""
|
2022-01-04 10:41:53 +00:00
|
|
|
// labels/selector used to list/delete rbd pods.
|
|
|
|
rbdPodLabels = "app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)"
|
2022-04-25 11:47:58 +00:00
|
|
|
|
|
|
|
exitOneErr = "command terminated with exit code 1"
|
2022-04-11 12:18:06 +00:00
|
|
|
|
|
|
|
// cluster Name, set by user.
|
|
|
|
clusterNameKey = "csi.ceph.com/cluster/name"
|
|
|
|
defaultClusterName = "k8s-cluster-1"
|
2019-12-03 12:36:15 +00:00
|
|
|
)
|
|
|
|
|
2020-02-25 11:45:54 +00:00
|
|
|
var (
|
2021-07-08 14:59:34 +00:00
|
|
|
// cli flags.
|
2020-02-25 11:57:12 +00:00
|
|
|
deployTimeout int
|
|
|
|
deployCephFS bool
|
|
|
|
deployRBD bool
|
2022-04-07 15:48:02 +00:00
|
|
|
deployNFS bool
|
2020-07-08 11:46:31 +00:00
|
|
|
testCephFS bool
|
|
|
|
testRBD bool
|
2022-04-07 15:48:02 +00:00
|
|
|
testNFS bool
|
2021-06-24 09:43:48 +00:00
|
|
|
helmTest bool
|
2020-07-25 16:39:50 +00:00
|
|
|
upgradeTesting bool
|
|
|
|
upgradeVersion string
|
2020-02-25 11:57:12 +00:00
|
|
|
cephCSINamespace string
|
|
|
|
rookNamespace string
|
2020-06-02 19:19:57 +00:00
|
|
|
radosNamespace string
|
2020-02-26 08:11:05 +00:00
|
|
|
poll = 2 * time.Second
|
2022-04-07 15:42:48 +00:00
|
|
|
isOpenShift bool
|
2022-04-08 11:14:15 +00:00
|
|
|
clusterID string
|
2022-04-12 15:14:35 +00:00
|
|
|
nfsDriverName string
|
2020-02-26 08:11:05 +00:00
|
|
|
)
|
2020-02-25 11:57:12 +00:00
|
|
|
|
2022-04-25 11:47:58 +00:00
|
|
|
type cephfsFilesystem struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
MetadataPool string `json:"metadata_pool"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// listCephFSFileSystems list CephFS filesystems in json format.
|
|
|
|
func listCephFSFileSystems(f *framework.Framework) ([]cephfsFilesystem, error) {
|
|
|
|
var fsList []cephfsFilesystem
|
|
|
|
|
|
|
|
stdout, stdErr, err := execCommandInToolBoxPod(
|
|
|
|
f,
|
|
|
|
"ceph fs ls --format=json",
|
|
|
|
rookNamespace)
|
|
|
|
if err != nil {
|
|
|
|
return fsList, err
|
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
return fsList, fmt.Errorf("error listing fs %v", stdErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = json.Unmarshal([]byte(stdout), &fsList)
|
|
|
|
if err != nil {
|
|
|
|
return fsList, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return fsList, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getCephFSMetadataPoolName get CephFS pool name from filesystem name.
|
|
|
|
func getCephFSMetadataPoolName(f *framework.Framework, filesystem string) (string, error) {
|
|
|
|
fsList, err := listCephFSFileSystems(f)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("list CephFS filesystem failed: %w", err)
|
|
|
|
}
|
|
|
|
for _, v := range fsList {
|
|
|
|
if v.Name != filesystem {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return v.MetadataPool, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", fmt.Errorf("metadata pool name not found for filesystem: %s", filesystem)
|
|
|
|
}
|
|
|
|
|
|
|
|
func compareStdoutWithCount(stdOut string, count int) error {
|
|
|
|
stdOut = strings.TrimSuffix(stdOut, "\n")
|
|
|
|
res, err := strconv.Atoi(stdOut)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to convert string to int: %v", stdOut)
|
|
|
|
}
|
|
|
|
if res != count {
|
|
|
|
return fmt.Errorf("expected omap object count %d, got %d", count, res)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// validateOmapCount validates no of OMAP entries on the given pool.
|
|
|
|
// Works with Cephfs and RBD drivers and mode can be snapsType or volumesType.
|
|
|
|
func validateOmapCount(f *framework.Framework, count int, driver, pool, mode string) {
|
|
|
|
type radosListCommand struct {
|
|
|
|
volumeMode string
|
|
|
|
driverType string
|
|
|
|
radosLsCmd, radosLsCmdFilter string
|
|
|
|
radosLsKeysCmd, radosLsKeysCmdFilter string
|
|
|
|
}
|
|
|
|
|
|
|
|
radosListCommands := []radosListCommand{
|
|
|
|
{
|
|
|
|
volumeMode: volumesType,
|
|
|
|
driverType: cephfsType,
|
|
|
|
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
|
|
|
|
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.",
|
|
|
|
pool),
|
|
|
|
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
|
|
|
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi|wc -l",
|
|
|
|
pool),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
volumeMode: volumesType,
|
|
|
|
driverType: rbdType,
|
|
|
|
radosLsCmd: fmt.Sprintf("rados ls %s", rbdOptions(pool)),
|
|
|
|
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.", rbdOptions(pool)),
|
|
|
|
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default %s", rbdOptions(pool)),
|
|
|
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
volumeMode: snapsType,
|
|
|
|
driverType: cephfsType,
|
|
|
|
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
|
|
|
|
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
|
|
|
|
pool),
|
|
|
|
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
|
|
|
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi|wc -l",
|
|
|
|
pool),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
volumeMode: snapsType,
|
|
|
|
driverType: rbdType,
|
|
|
|
radosLsCmd: fmt.Sprintf("rados ls %s", rbdOptions(pool)),
|
|
|
|
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", rbdOptions(pool)),
|
|
|
|
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default %s", rbdOptions(pool)),
|
|
|
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, cmds := range radosListCommands {
|
|
|
|
if !strings.EqualFold(cmds.volumeMode, mode) || !strings.EqualFold(cmds.driverType, driver) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
filterCmds := []string{cmds.radosLsCmdFilter, cmds.radosLsKeysCmdFilter}
|
|
|
|
filterLessCmds := []string{cmds.radosLsCmd, cmds.radosLsKeysCmd}
|
|
|
|
for i, cmd := range filterCmds {
|
|
|
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
|
|
|
if err != nil || stdErr != "" {
|
|
|
|
if !strings.Contains(err.Error(), exitOneErr) {
|
|
|
|
e2elog.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = compareStdoutWithCount(stdOut, count)
|
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
saveErr := err
|
|
|
|
if strings.Contains(err.Error(), "expected omap object count") {
|
|
|
|
stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace)
|
|
|
|
if err == nil {
|
|
|
|
e2elog.Logf("additional debug info: rados ls command output: %s, stdErr: %s", stdOut, stdErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
e2elog.Failf("%v", saveErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func getMons(ns string, c kubernetes.Interface) ([]string, error) {
|
2019-05-31 09:34:04 +00:00
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: "app=rook-ceph-mon",
|
|
|
|
}
|
|
|
|
services := make([]string, 0)
|
2020-09-03 09:34:29 +00:00
|
|
|
|
2021-07-20 14:23:14 +00:00
|
|
|
var svcList *v1.ServiceList
|
|
|
|
t := time.Duration(deployTimeout) * time.Minute
|
|
|
|
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
|
|
|
var svcErr error
|
|
|
|
svcList, svcErr = c.CoreV1().Services(ns).List(context.TODO(), opt)
|
|
|
|
if svcErr != nil {
|
|
|
|
if isRetryableAPIError(svcErr) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, fmt.Errorf("failed to list Services in namespace %q: %w", ns, svcErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
})
|
2020-09-03 09:34:29 +00:00
|
|
|
if err != nil {
|
2021-07-20 14:23:14 +00:00
|
|
|
return services, fmt.Errorf("could not get Services: %w", err)
|
2020-09-03 09:34:29 +00:00
|
|
|
}
|
2019-06-10 06:48:41 +00:00
|
|
|
for i := range svcList.Items {
|
2021-06-25 13:02:06 +00:00
|
|
|
s := fmt.Sprintf(
|
|
|
|
"%s.%s.svc.cluster.local:%d",
|
|
|
|
svcList.Items[i].Name,
|
|
|
|
svcList.Items[i].Namespace,
|
|
|
|
svcList.Items[i].Spec.Ports[0].Port)
|
2019-05-31 09:34:04 +00:00
|
|
|
services = append(services, s)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return services, nil
|
2019-05-31 09:34:04 +00:00
|
|
|
}
|
|
|
|
|
2021-10-01 04:15:45 +00:00
|
|
|
func getMonsHash(mons string) string {
|
|
|
|
return fmt.Sprintf("%x", md5.Sum([]byte(mons))) //nolint:gosec // hash generation
|
|
|
|
}
|
|
|
|
|
2022-04-08 11:08:32 +00:00
|
|
|
func getClusterID(f *framework.Framework) (string, error) {
|
2022-04-08 11:14:15 +00:00
|
|
|
if clusterID != "" {
|
|
|
|
return clusterID, nil
|
|
|
|
}
|
|
|
|
|
2022-04-08 11:08:32 +00:00
|
|
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed getting clusterID through toolbox: %w", err)
|
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
return "", fmt.Errorf("error getting fsid: %s", stdErr)
|
|
|
|
}
|
|
|
|
// remove new line present in fsID
|
|
|
|
return strings.Trim(fsID, "\n"), nil
|
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func getStorageClass(path string) (scv1.StorageClass, error) {
|
2019-05-31 09:34:04 +00:00
|
|
|
sc := scv1.StorageClass{}
|
|
|
|
err := unmarshal(path, &sc)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return sc, err
|
2019-05-31 09:34:04 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func getSecret(path string) (v1.Secret, error) {
|
2019-05-31 09:34:04 +00:00
|
|
|
sc := v1.Secret{}
|
|
|
|
err := unmarshal(path, &sc)
|
2019-06-10 06:48:41 +00:00
|
|
|
// discard corruptInputError
|
2019-05-31 09:34:04 +00:00
|
|
|
if err != nil {
|
2020-06-25 08:35:19 +00:00
|
|
|
var b64cie base64.CorruptInputError
|
|
|
|
if !errors.As(err, &b64cie) {
|
2020-09-03 09:34:29 +00:00
|
|
|
return sc, err
|
2019-05-31 09:34:04 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return sc, nil
|
2020-04-09 05:48:09 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func deleteResource(scPath string) error {
|
2020-02-26 08:11:05 +00:00
|
|
|
data, err := replaceNamespaceInTemplate(scPath)
|
|
|
|
if err != nil {
|
|
|
|
e2elog.Logf("failed to read content from %s %v", scPath, err)
|
|
|
|
}
|
2022-02-15 08:25:28 +00:00
|
|
|
err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true")
|
2020-02-26 08:11:05 +00:00
|
|
|
if err != nil {
|
|
|
|
e2elog.Logf("failed to delete %s %v", scPath, err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-05-31 09:34:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func unmarshal(fileName string, obj interface{}) error {
|
2022-01-21 09:32:34 +00:00
|
|
|
f, err := os.ReadFile(fileName)
|
2019-05-31 09:34:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
data, err := utilyaml.ToJSON(f)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = json.Unmarshal(data, obj)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-31 09:34:04 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-18 07:07:11 +00:00
|
|
|
// createPVCAndApp creates pvc and pod
|
2020-07-19 12:21:03 +00:00
|
|
|
// if name is not empty same will be set as pvc and app name.
|
2021-06-25 13:02:06 +00:00
|
|
|
func createPVCAndApp(
|
|
|
|
name string,
|
|
|
|
f *framework.Framework,
|
|
|
|
pvc *v1.PersistentVolumeClaim,
|
|
|
|
app *v1.Pod,
|
2022-06-01 10:17:19 +00:00
|
|
|
pvcTimeout int,
|
|
|
|
) error {
|
2019-06-18 07:07:11 +00:00
|
|
|
if name != "" {
|
|
|
|
pvc.Name = name
|
|
|
|
app.Name = name
|
|
|
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
|
|
|
|
}
|
2020-03-27 18:21:18 +00:00
|
|
|
err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout)
|
2019-06-18 07:07:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = createApp(f.ClientSet, app, deployTimeout)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-06-18 07:07:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-15 14:27:40 +00:00
|
|
|
// createPVCAndDeploymentApp creates pvc and deployment.
|
2021-10-28 06:42:57 +00:00
|
|
|
func createPVCAndDeploymentApp(
|
|
|
|
f *framework.Framework,
|
|
|
|
pvc *v1.PersistentVolumeClaim,
|
|
|
|
app *appsv1.Deployment,
|
2022-06-01 10:17:19 +00:00
|
|
|
pvcTimeout int,
|
|
|
|
) error {
|
2021-10-28 06:42:57 +00:00
|
|
|
err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = createDeploymentApp(f.ClientSet, app, deployTimeout)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-15 14:27:40 +00:00
|
|
|
// validatePVCAndDeploymentAppBinding creates PVC and Deployment, and waits until
|
|
|
|
// all its replicas are Running. Use `replicas` to override default number of replicas
|
|
|
|
// defined in `deploymentPath` Deployment manifest.
|
|
|
|
func validatePVCAndDeploymentAppBinding(
|
|
|
|
f *framework.Framework,
|
|
|
|
pvcPath string,
|
|
|
|
deploymentPath string,
|
|
|
|
namespace string,
|
|
|
|
replicas *int32,
|
|
|
|
pvcTimeout int,
|
|
|
|
) (*v1.PersistentVolumeClaim, *appsv1.Deployment, error) {
|
|
|
|
pvc, err := loadPVC(pvcPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to load PVC: %w", err)
|
|
|
|
}
|
|
|
|
pvc.Namespace = namespace
|
|
|
|
|
|
|
|
depl, err := loadAppDeployment(deploymentPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to load Deployment: %w", err)
|
|
|
|
}
|
|
|
|
depl.Namespace = f.UniqueName
|
|
|
|
if replicas != nil {
|
|
|
|
depl.Spec.Replicas = replicas
|
|
|
|
}
|
|
|
|
|
|
|
|
err = createPVCAndDeploymentApp(f, pvc, depl, pvcTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = waitForDeploymentComplete(f.ClientSet, depl.Name, depl.Namespace, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return pvc, depl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletePVCAndDeploymentApp deletes pvc and deployment.
|
2021-10-28 06:42:57 +00:00
|
|
|
func deletePVCAndDeploymentApp(
|
|
|
|
f *framework.Framework,
|
|
|
|
pvc *v1.PersistentVolumeClaim,
|
2022-06-01 10:17:19 +00:00
|
|
|
app *appsv1.Deployment,
|
|
|
|
) error {
|
2021-10-28 06:42:57 +00:00
|
|
|
err := deleteDeploymentApp(f.ClientSet, app.Name, app.Namespace, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-18 07:07:11 +00:00
|
|
|
// deletePVCAndApp delete pvc and pod
|
2020-07-19 12:21:03 +00:00
|
|
|
// if name is not empty same will be set as pvc and app name.
|
2019-06-18 07:07:11 +00:00
|
|
|
func deletePVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
|
|
|
|
if name != "" {
|
|
|
|
pvc.Name = name
|
|
|
|
app.Name = name
|
|
|
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
|
|
|
|
}
|
|
|
|
|
|
|
|
err := deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-06-18 07:07:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-25 13:02:06 +00:00
|
|
|
func createPVCAndAppBinding(
|
|
|
|
pvcPath, appPath string,
|
|
|
|
f *framework.Framework,
|
2022-06-01 10:17:19 +00:00
|
|
|
pvcTimeout int,
|
|
|
|
) (*v1.PersistentVolumeClaim, *v1.Pod, error) {
|
2019-06-14 09:26:17 +00:00
|
|
|
pvc, err := loadPVC(pvcPath)
|
2020-10-20 09:55:09 +00:00
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return nil, nil, err
|
2019-06-14 09:26:17 +00:00
|
|
|
}
|
2019-05-31 09:34:04 +00:00
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
|
2019-06-14 09:26:17 +00:00
|
|
|
app, err := loadApp(appPath)
|
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return nil, nil, err
|
2019-06-14 09:26:17 +00:00
|
|
|
}
|
2019-05-31 09:34:04 +00:00
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
|
2020-03-27 18:21:18 +00:00
|
|
|
err = createPVCAndApp("", f, pvc, app, pvcTimeout)
|
2019-05-31 09:34:04 +00:00
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return nil, nil, err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return pvc, app, nil
|
2020-03-27 18:21:18 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) error {
|
|
|
|
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2020-09-03 09:34:29 +00:00
|
|
|
err = deletePVCAndApp("", f, pvc, app)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2021-11-15 11:41:24 +00:00
|
|
|
func getMountType(selector, mountPath string, f *framework.Framework) (string, error) {
|
2019-12-13 11:41:32 +00:00
|
|
|
opt := metav1.ListOptions{
|
2021-11-15 11:41:24 +00:00
|
|
|
LabelSelector: selector,
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
cmd := fmt.Sprintf("lsblk -o TYPE,MOUNTPOINT | grep '%s' | awk '{print $1}'", mountPath)
|
2021-11-15 11:41:24 +00:00
|
|
|
stdOut, stdErr, err := execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt)
|
2020-09-03 09:34:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
if stdErr != "" {
|
2022-06-01 10:17:19 +00:00
|
|
|
return strings.TrimSpace(stdOut), fmt.Errorf("%s", stdErr)
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 11:41:32 +00:00
|
|
|
return strings.TrimSpace(stdOut), nil
|
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
2019-06-14 09:26:17 +00:00
|
|
|
pvc, err := loadPVC(pvcPath)
|
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-06-14 09:26:17 +00:00
|
|
|
}
|
2019-06-11 12:40:31 +00:00
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
pvc.Name = f.UniqueName
|
2019-06-14 09:26:17 +00:00
|
|
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
2019-06-11 12:40:31 +00:00
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-06-11 12:40:31 +00:00
|
|
|
}
|
|
|
|
var user int64 = 2000
|
|
|
|
app := &v1.Pod{
|
|
|
|
TypeMeta: metav1.TypeMeta{
|
|
|
|
Kind: "Pod",
|
|
|
|
APIVersion: "v1",
|
|
|
|
},
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "pod-run-as-non-root",
|
|
|
|
Namespace: f.UniqueName,
|
|
|
|
Labels: map[string]string{
|
|
|
|
"app": "pod-run-as-non-root",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
2022-01-24 06:29:14 +00:00
|
|
|
SecurityContext: &v1.PodSecurityContext{FSGroup: &user},
|
2019-06-11 12:40:31 +00:00
|
|
|
Containers: []v1.Container{
|
|
|
|
{
|
|
|
|
Name: "write-pod",
|
2021-08-03 10:29:04 +00:00
|
|
|
Image: "quay.io/centos/centos:latest",
|
2019-06-11 12:40:31 +00:00
|
|
|
Command: []string{"/bin/sleep", "999999"},
|
|
|
|
SecurityContext: &v1.SecurityContext{
|
|
|
|
RunAsUser: &user,
|
|
|
|
},
|
|
|
|
VolumeMounts: []v1.VolumeMount{
|
|
|
|
{
|
|
|
|
MountPath: "/target",
|
|
|
|
Name: "target",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Volumes: []v1.Volume{
|
|
|
|
{
|
|
|
|
Name: "target",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
|
|
ClaimName: pvc.Name,
|
2021-07-13 13:09:31 +00:00
|
|
|
ReadOnly: false,
|
|
|
|
},
|
2019-06-11 12:40:31 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
err = createApp(f.ClientSet, app, deployTimeout)
|
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-06-11 12:40:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: "app=pod-run-as-non-root",
|
|
|
|
}
|
2020-09-03 09:34:29 +00:00
|
|
|
_, stdErr, err := execCommandInPod(f, "echo testing > /target/testing", app.Namespace, &opt)
|
2019-06-11 12:40:31 +00:00
|
|
|
if err != nil {
|
2021-06-25 11:37:24 +00:00
|
|
|
return fmt.Errorf("failed to exec command in pod: %w", err)
|
2019-06-11 12:40:31 +00:00
|
|
|
}
|
2020-09-03 09:34:29 +00:00
|
|
|
if stdErr != "" {
|
|
|
|
return fmt.Errorf("failed to touch a file as non-root user %v", stdErr)
|
2019-06-11 12:40:31 +00:00
|
|
|
}
|
2021-01-14 07:47:57 +00:00
|
|
|
|
|
|
|
// metrics for BlockMode was added in Kubernetes 1.22
|
|
|
|
isBlockMode := false
|
|
|
|
if pvc.Spec.VolumeMode != nil {
|
|
|
|
isBlockMode = (*pvc.Spec.VolumeMode == v1.PersistentVolumeBlock)
|
|
|
|
}
|
2022-07-25 04:40:20 +00:00
|
|
|
if !isBlockMode && !isOpenShift {
|
2021-01-14 07:47:57 +00:00
|
|
|
err = getMetricsForPVC(f, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
2020-01-23 01:47:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
2019-06-18 07:07:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-03 16:55:44 +00:00
|
|
|
// writeDataInPod fill zero content to a file in the provided POD volume.
|
2020-10-13 05:34:55 +00:00
|
|
|
func writeDataInPod(app *v1.Pod, opt *metav1.ListOptions, f *framework.Framework) error {
|
2020-08-03 16:55:44 +00:00
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
|
|
|
|
err := createApp(f.ClientSet, app, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-13 05:34:55 +00:00
|
|
|
|
2020-08-03 16:55:44 +00:00
|
|
|
// write data to PVC. The idea here is to fill some content in the file
|
|
|
|
// instead of filling and reverifying the md5sum/data integrity
|
|
|
|
filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
|
|
|
// While writing more data we are encountering issues in E2E timeout, so keeping it low for now
|
2021-06-25 13:02:06 +00:00
|
|
|
_, writeErr, err := execCommandInPod(
|
|
|
|
f,
|
|
|
|
fmt.Sprintf("dd if=/dev/zero of=%s bs=1M count=10 status=none", filePath),
|
|
|
|
app.Namespace,
|
|
|
|
opt)
|
2020-08-05 07:35:56 +00:00
|
|
|
if err != nil {
|
2020-09-03 09:34:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if writeErr != "" {
|
|
|
|
err = fmt.Errorf("failed to write data %v", writeErr)
|
2020-08-05 07:35:56 +00:00
|
|
|
}
|
2020-09-03 09:34:29 +00:00
|
|
|
|
|
|
|
return err
|
2020-08-05 07:35:56 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
|
|
|
data := "checking data persist"
|
|
|
|
pvc, err := loadPVC(pvcPath)
|
2020-10-20 09:55:09 +00:00
|
|
|
if err != nil {
|
2019-07-03 10:02:36 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-11-25 11:09:24 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
|
|
|
|
app, err := loadApp(appPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
app.Labels = map[string]string{"app": "validate-data"}
|
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
|
2020-03-27 18:21:18 +00:00
|
|
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
2019-07-03 10:02:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: "app=validate-data",
|
|
|
|
}
|
|
|
|
// write data to PVC
|
|
|
|
filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
|
|
|
|
2020-09-03 09:34:29 +00:00
|
|
|
_, stdErr, err := execCommandInPod(f, fmt.Sprintf("echo %s > %s", data, filePath), app.Namespace, &opt)
|
|
|
|
if err != nil {
|
2021-06-25 11:37:24 +00:00
|
|
|
return fmt.Errorf("failed to exec command in pod: %w", err)
|
2020-09-03 09:34:29 +00:00
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
return fmt.Errorf("failed to write data to a file %v", stdErr)
|
|
|
|
}
|
2019-07-03 10:02:36 +00:00
|
|
|
// delete app
|
|
|
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// recreate app and check data persist
|
|
|
|
err = createApp(f.ClientSet, app, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-09-03 09:34:29 +00:00
|
|
|
persistData, stdErr, err := execCommandInPod(f, fmt.Sprintf("cat %s", filePath), app.Namespace, &opt)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
return fmt.Errorf("failed to get file content %v", stdErr)
|
|
|
|
}
|
2019-07-03 10:02:36 +00:00
|
|
|
if !strings.Contains(persistData, data) {
|
|
|
|
return fmt.Errorf("data not persistent expected data %s received data %s ", data, persistData)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = deletePVCAndApp("", f, pvc, app)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-01-31 08:49:11 +00:00
|
|
|
|
2021-09-20 10:16:55 +00:00
|
|
|
func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framework) error {
|
2020-01-31 08:49:11 +00:00
|
|
|
pvc, err := loadPVC(pvcPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
|
|
|
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-20 10:16:55 +00:00
|
|
|
if cephFS {
|
2020-01-31 08:49:11 +00:00
|
|
|
err = deleteBackingCephFSVolume(f, pvc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-20 10:16:55 +00:00
|
|
|
// delete cephFS filesystem
|
2022-04-07 15:36:45 +00:00
|
|
|
err = deletePool(fileSystemName, cephFS, f)
|
2020-09-03 09:34:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-31 08:49:11 +00:00
|
|
|
} else {
|
|
|
|
err = deleteBackingRBDImage(f, pvc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// delete rbd pool
|
2021-09-20 10:16:55 +00:00
|
|
|
err = deletePool(defaultRBDPool, cephFS, f)
|
2020-09-03 09:34:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-31 08:49:11 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-03-02 08:31:44 +00:00
|
|
|
|
|
|
|
func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFlags []string) error {
|
|
|
|
pvc, err := loadPVC(pvcPath)
|
2020-10-20 09:55:09 +00:00
|
|
|
if err != nil {
|
2020-03-02 08:31:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
|
|
|
|
app, err := loadApp(appPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
app.Labels = map[string]string{"app": "validate-mount-opt"}
|
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
|
2020-03-27 18:21:18 +00:00
|
|
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
2020-03-02 08:31:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: "app=validate-mount-opt",
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd := fmt.Sprintf("mount |grep %s", app.Spec.Containers[0].VolumeMounts[0].MountPath)
|
2020-09-03 09:34:29 +00:00
|
|
|
data, stdErr, err := execCommandInPod(f, cmd, app.Namespace, &opt)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
return fmt.Errorf("failed to get mount point %v", stdErr)
|
|
|
|
}
|
2020-03-02 08:31:44 +00:00
|
|
|
for _, f := range mountFlags {
|
|
|
|
if !strings.Contains(data, f) {
|
|
|
|
return fmt.Errorf("mount option %s not found in %s", f, data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = deletePVCAndApp("", f, pvc, app)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-02 08:31:44 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-03-27 18:21:18 +00:00
|
|
|
|
|
|
|
func addTopologyDomainsToDSYaml(template, labels string) string {
|
|
|
|
return strings.ReplaceAll(template, "# - \"--domainlabels=failure-domain/region,failure-domain/zone\"",
|
|
|
|
"- \"--domainlabels="+labels+"\"")
|
|
|
|
}
|
2020-09-20 00:43:26 +00:00
|
|
|
|
|
|
|
func oneReplicaDeployYaml(template string) string {
|
2021-07-13 13:09:31 +00:00
|
|
|
re := regexp.MustCompile(`(\s+replicas:) \d+`)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-09-20 00:43:26 +00:00
|
|
|
return re.ReplaceAllString(template, `$1 1`)
|
|
|
|
}
|
2020-10-15 13:53:55 +00:00
|
|
|
|
2020-12-08 10:42:31 +00:00
|
|
|
func enableTopologyInTemplate(data string) string {
|
|
|
|
return strings.ReplaceAll(data, "--feature-gates=Topology=false", "--feature-gates=Topology=true")
|
|
|
|
}
|
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.Framework) (string, error) {
|
|
|
|
filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
|
|
|
// write data in PVC
|
|
|
|
err := writeDataInPod(app, opt, f)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Logf("failed to write data in the pod: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
checkSum, err := calculateSHA512sum(f, app, filePath, opt)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Logf("failed to calculate checksum: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
return checkSum, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to delete pod: %v", err)
|
2020-10-13 05:34:55 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
return checkSum, nil
|
|
|
|
}
|
|
|
|
|
2021-07-12 10:59:25 +00:00
|
|
|
// nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
2021-06-25 13:02:06 +00:00
|
|
|
func validatePVCClone(
|
|
|
|
totalCount int,
|
2021-10-27 08:25:23 +00:00
|
|
|
sourcePvcPath, sourceAppPath, clonePvcPath, clonePvcAppPath,
|
2022-04-26 06:38:49 +00:00
|
|
|
restoreSCName,
|
2021-10-27 08:25:23 +00:00
|
|
|
dataPool string,
|
2021-07-09 11:49:23 +00:00
|
|
|
kms kmsConfig,
|
2021-06-25 13:02:06 +00:00
|
|
|
validatePVC validateFunc,
|
2022-06-01 10:17:19 +00:00
|
|
|
f *framework.Framework,
|
|
|
|
) {
|
2020-10-15 13:53:55 +00:00
|
|
|
var wg sync.WaitGroup
|
2020-11-04 14:45:06 +00:00
|
|
|
wgErrs := make([]error, totalCount)
|
2020-10-13 05:34:55 +00:00
|
|
|
chErrs := make([]error, totalCount)
|
2020-10-15 13:53:55 +00:00
|
|
|
pvc, err := loadPVC(sourcePvcPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load PVC: %v", err)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
label := make(map[string]string)
|
2020-10-15 13:53:55 +00:00
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to create PVC: %v", err)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
2020-10-13 05:34:55 +00:00
|
|
|
app, err := loadApp(sourceAppPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load app: %v", err)
|
2020-10-13 05:34:55 +00:00
|
|
|
}
|
|
|
|
label[appKey] = appLabel
|
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
|
|
|
app.Labels = label
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
|
|
|
}
|
|
|
|
|
|
|
|
checkSum := ""
|
2022-05-06 06:59:50 +00:00
|
|
|
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
|
2020-10-13 05:34:55 +00:00
|
|
|
if err != nil {
|
|
|
|
e2elog.Failf("failed to get pvc %v", err)
|
|
|
|
}
|
|
|
|
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
|
|
|
|
checkSum, err = writeDataAndCalChecksum(app, &opt, f)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to calculate checksum: %v", err)
|
2020-10-13 05:34:55 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-15 13:53:55 +00:00
|
|
|
// validate created backend rbd images
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, 1, defaultRBDPool)
|
2020-10-15 13:53:55 +00:00
|
|
|
pvcClone, err := loadPVC(clonePvcPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load PVC: %v", err)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
pvcClone.Spec.DataSource.Name = pvc.Name
|
|
|
|
pvcClone.Namespace = f.UniqueName
|
2022-04-26 06:38:49 +00:00
|
|
|
if restoreSCName != "" {
|
|
|
|
pvcClone.Spec.StorageClassName = &restoreSCName
|
|
|
|
}
|
|
|
|
|
2020-10-15 13:53:55 +00:00
|
|
|
appClone, err := loadApp(clonePvcAppPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load application: %v", err)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
appClone.Namespace = f.UniqueName
|
|
|
|
wg.Add(totalCount)
|
|
|
|
// create clone and bind it to an app
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2020-10-15 13:53:55 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
2020-10-13 05:34:55 +00:00
|
|
|
label := make(map[string]string)
|
|
|
|
label[appKey] = name
|
|
|
|
a.Labels = label
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
|
|
|
}
|
2020-11-04 14:45:06 +00:00
|
|
|
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
2021-10-27 08:25:23 +00:00
|
|
|
if wgErrs[n] == nil && dataPool != noDataPool {
|
|
|
|
wgErrs[n] = checkPVCDataPoolForImageInPool(f, &p, defaultRBDPool, dataPool)
|
|
|
|
}
|
2021-07-09 11:49:23 +00:00
|
|
|
if wgErrs[n] == nil && kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-05-20 08:46:39 +00:00
|
|
|
imageData, sErr := getImageInfoFromPVC(p.Namespace, name, f)
|
|
|
|
if sErr != nil {
|
|
|
|
wgErrs[n] = fmt.Errorf(
|
|
|
|
"failed to get image info for %s namespace=%s volumehandle=%s error=%w",
|
|
|
|
name,
|
|
|
|
p.Namespace,
|
|
|
|
imageData.csiVolumeHandle,
|
|
|
|
sErr)
|
|
|
|
} else {
|
|
|
|
// check new passphrase created
|
2021-07-09 11:49:23 +00:00
|
|
|
stdOut, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle)
|
2021-05-20 08:46:39 +00:00
|
|
|
if stdOut != "" {
|
|
|
|
e2elog.Logf("successfully read the passphrase from vault: %s", stdOut)
|
|
|
|
}
|
|
|
|
if stdErr != "" {
|
|
|
|
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-10-13 05:34:55 +00:00
|
|
|
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem && wgErrs[n] == nil {
|
|
|
|
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
2021-05-11 12:43:39 +00:00
|
|
|
var checkSumClone string
|
2020-10-13 05:34:55 +00:00
|
|
|
e2elog.Logf("Calculating checksum clone for filepath %s", filePath)
|
|
|
|
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
|
|
|
e2elog.Logf("checksum for clone is %s", checkSumClone)
|
|
|
|
if chErrs[n] != nil {
|
|
|
|
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n])
|
|
|
|
}
|
|
|
|
if checkSumClone != checkSum {
|
|
|
|
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
|
|
|
|
}
|
|
|
|
}
|
2021-07-09 11:49:23 +00:00
|
|
|
if wgErrs[n] == nil && validatePVC != nil && kms != noKMS {
|
2021-06-07 08:13:14 +00:00
|
|
|
wgErrs[n] = validatePVC(f, &p, &a)
|
2021-03-12 15:42:31 +00:00
|
|
|
}
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
2020-11-04 14:45:06 +00:00
|
|
|
failed := 0
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to create PVC (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("creating PVCs failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
2020-10-13 05:34:55 +00:00
|
|
|
for i, err := range chErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
2020-10-15 13:53:55 +00:00
|
|
|
// total images in cluster is 1 parent rbd image+ total
|
|
|
|
// temporary clone+ total clones
|
|
|
|
totalCloneCount := totalCount + totalCount + 1
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
|
2020-10-15 13:53:55 +00:00
|
|
|
// delete parent pvc
|
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to delete PVC: %v", err)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
totalCloneCount = totalCount + totalCount
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
|
2020-10-15 13:53:55 +00:00
|
|
|
wg.Add(totalCount)
|
|
|
|
// delete clone and app
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2020-10-15 13:53:55 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
p.Spec.DataSource.Name = name
|
2021-05-20 08:46:39 +00:00
|
|
|
var imageData imageInfoFromPVC
|
|
|
|
var sErr error
|
2021-07-09 11:49:23 +00:00
|
|
|
if kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-05-20 08:46:39 +00:00
|
|
|
imageData, sErr = getImageInfoFromPVC(p.Namespace, name, f)
|
|
|
|
if sErr != nil {
|
|
|
|
wgErrs[n] = fmt.Errorf(
|
|
|
|
"failed to get image info for %s namespace=%s volumehandle=%s error=%w",
|
|
|
|
name,
|
|
|
|
p.Namespace,
|
|
|
|
imageData.csiVolumeHandle,
|
|
|
|
sErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if wgErrs[n] == nil {
|
|
|
|
wgErrs[n] = deletePVCAndApp(name, f, &p, &a)
|
2021-07-09 11:49:23 +00:00
|
|
|
if wgErrs[n] == nil && kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-05-20 08:46:39 +00:00
|
|
|
// check passphrase deleted
|
2021-07-09 11:49:23 +00:00
|
|
|
stdOut, _ := kms.getPassphrase(f, imageData.csiVolumeHandle)
|
2021-05-20 08:46:39 +00:00
|
|
|
if stdOut != "" {
|
|
|
|
wgErrs[n] = fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
|
|
|
|
}
|
|
|
|
}
|
2021-08-03 09:27:12 +00:00
|
|
|
if wgErrs[n] == nil && kms.canVerifyKeyDestroyed() {
|
|
|
|
destroyed, msg := kms.verifyKeyDestroyed(f, imageData.csiVolumeHandle)
|
|
|
|
if !destroyed {
|
|
|
|
wgErrs[n] = fmt.Errorf("passphrased was not destroyed: %s", msg)
|
|
|
|
}
|
|
|
|
}
|
2021-05-20 08:46:39 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
2020-11-04 14:45:06 +00:00
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
2020-10-15 13:53:55 +00:00
|
|
|
}
|
2020-10-14 07:35:58 +00:00
|
|
|
|
2021-07-12 10:59:25 +00:00
|
|
|
// nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
2021-06-25 13:02:06 +00:00
|
|
|
func validatePVCSnapshot(
|
|
|
|
totalCount int,
|
2021-07-09 11:49:23 +00:00
|
|
|
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
|
2021-10-27 08:22:50 +00:00
|
|
|
kms, restoreKMS kmsConfig, restoreSCName,
|
2022-06-01 10:17:19 +00:00
|
|
|
dataPool string, f *framework.Framework,
|
|
|
|
) {
|
2021-04-08 15:46:11 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wgErrs := make([]error, totalCount)
|
|
|
|
chErrs := make([]error, totalCount)
|
|
|
|
err := createRBDSnapshotClass(f)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to create storageclass: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = deleteRBDSnapshotClass()
|
|
|
|
if err != nil {
|
|
|
|
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
pvc, err := loadPVC(pvcPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load PVC: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
label := make(map[string]string)
|
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to create PVC: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
app, err := loadApp(appPath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load app: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
// write data in PVC
|
|
|
|
label[appKey] = appLabel
|
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
app.Labels = label
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
|
|
|
}
|
|
|
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
|
|
|
checkSum, err := writeDataAndCalChecksum(app, &opt, f)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to calculate checksum: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, 1, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
snap := getSnapshot(snapshotPath)
|
|
|
|
snap.Namespace = f.UniqueName
|
|
|
|
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
2021-06-28 09:53:33 +00:00
|
|
|
|
|
|
|
wg.Add(totalCount)
|
2021-04-08 15:46:11 +00:00
|
|
|
// create snapshot
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, s snapapi.VolumeSnapshot) {
|
2021-04-08 15:46:11 +00:00
|
|
|
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
wgErrs[n] = createSnapshot(&s, deployTimeout)
|
2021-07-09 11:49:23 +00:00
|
|
|
if wgErrs[n] == nil && kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-04-26 13:15:18 +00:00
|
|
|
content, sErr := getVolumeSnapshotContent(s.Namespace, s.Name)
|
|
|
|
if sErr != nil {
|
2021-06-25 13:02:06 +00:00
|
|
|
wgErrs[n] = fmt.Errorf(
|
2021-11-22 06:48:18 +00:00
|
|
|
"failed to get snapshotcontent for %s in namespace %s: %w",
|
2021-06-25 13:02:06 +00:00
|
|
|
s.Name,
|
|
|
|
s.Namespace,
|
|
|
|
sErr)
|
2021-04-26 13:15:18 +00:00
|
|
|
} else {
|
|
|
|
// check new passphrase created
|
2021-07-09 11:49:23 +00:00
|
|
|
_, stdErr := kms.getPassphrase(f, *content.Status.SnapshotHandle)
|
2021-04-26 13:15:18 +00:00
|
|
|
if stdErr != "" {
|
|
|
|
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, snap)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
failed := 0
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("creating snapshots failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// total images in cluster is 1 parent rbd image+ total snaps
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCount+1, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
pvcClone, err := loadPVC(pvcClonePath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load PVC: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
appClone, err := loadApp(appClonePath)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to load application: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
pvcClone.Namespace = f.UniqueName
|
|
|
|
appClone.Namespace = f.UniqueName
|
|
|
|
pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0)
|
2021-09-29 06:46:55 +00:00
|
|
|
if restoreSCName != "" {
|
|
|
|
pvcClone.Spec.StorageClassName = &restoreSCName
|
|
|
|
}
|
2021-04-08 15:46:11 +00:00
|
|
|
|
|
|
|
// create multiple PVC from same snapshot
|
|
|
|
wg.Add(totalCount)
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2021-04-08 15:46:11 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
label := make(map[string]string)
|
|
|
|
label[appKey] = name
|
|
|
|
a.Labels = label
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
|
|
|
}
|
|
|
|
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
2021-09-29 06:46:55 +00:00
|
|
|
if wgErrs[n] == nil && restoreKMS != noKMS {
|
|
|
|
if restoreKMS.canGetPassphrase() {
|
|
|
|
imageData, sErr := getImageInfoFromPVC(p.Namespace, name, f)
|
|
|
|
if sErr != nil {
|
|
|
|
wgErrs[n] = fmt.Errorf(
|
|
|
|
"failed to get image info for %s namespace=%s volumehandle=%s error=%w",
|
|
|
|
name,
|
|
|
|
p.Namespace,
|
|
|
|
imageData.csiVolumeHandle,
|
|
|
|
sErr)
|
|
|
|
} else {
|
|
|
|
// check new passphrase created
|
|
|
|
_, stdErr := restoreKMS.getPassphrase(f, imageData.csiVolumeHandle)
|
|
|
|
if stdErr != "" {
|
|
|
|
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wgErrs[n] = isEncryptedPVC(f, &p, &a)
|
|
|
|
}
|
2021-04-08 15:46:11 +00:00
|
|
|
if wgErrs[n] == nil {
|
|
|
|
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
2021-05-11 12:43:39 +00:00
|
|
|
var checkSumClone string
|
2021-04-08 15:46:11 +00:00
|
|
|
e2elog.Logf("calculating checksum clone for filepath %s", filePath)
|
|
|
|
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
|
|
|
|
e2elog.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name)
|
|
|
|
if chErrs[n] != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Logf("failed to calculte checksum for clone: %s", chErrs[n])
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
if checkSumClone != checkSum {
|
2021-06-25 13:02:06 +00:00
|
|
|
e2elog.Logf(
|
|
|
|
"checksum value didn't match. checksum=%s and checksumclone=%s",
|
|
|
|
checkSum,
|
|
|
|
checkSumClone)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, err := range chErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("calculating checksum failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
// total images in cluster is 1 parent rbd image+ total
|
|
|
|
// snaps+ total clones
|
|
|
|
totalCloneCount := totalCount + totalCount + 1
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
wg.Add(totalCount)
|
|
|
|
// delete clone and app
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2021-04-08 15:46:11 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
p.Spec.DataSource.Name = name
|
|
|
|
wgErrs[n] = deletePVCAndApp(name, f, &p, &a)
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// total images in cluster is 1 parent rbd image+ total
|
|
|
|
// snaps
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCount+1, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
// create clones from different snapshots and bind it to an
|
|
|
|
// app
|
|
|
|
wg.Add(totalCount)
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2021-04-08 15:46:11 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
p.Spec.DataSource.Name = name
|
|
|
|
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
2021-10-27 08:22:50 +00:00
|
|
|
if wgErrs[n] == nil && dataPool != noDataPool {
|
|
|
|
wgErrs[n] = checkPVCDataPoolForImageInPool(f, &p, defaultRBDPool, dataPool)
|
|
|
|
}
|
|
|
|
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// total images in cluster is 1 parent rbd image+ total
|
|
|
|
// snaps+ total clones
|
|
|
|
totalCloneCount = totalCount + totalCount + 1
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
// delete parent pvc
|
|
|
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to delete PVC: %v", err)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// total images in cluster is total snaps+ total clones
|
|
|
|
totalSnapCount := totalCount + totalCount
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalSnapCount, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
wg.Add(totalCount)
|
|
|
|
// delete snapshot
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, s snapapi.VolumeSnapshot) {
|
2021-04-08 15:46:11 +00:00
|
|
|
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
2021-06-23 07:36:12 +00:00
|
|
|
content := &snapapi.VolumeSnapshotContent{}
|
2021-04-26 13:15:18 +00:00
|
|
|
var err error
|
2021-07-09 11:49:23 +00:00
|
|
|
if kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-04-26 13:15:18 +00:00
|
|
|
content, err = getVolumeSnapshotContent(s.Namespace, s.Name)
|
|
|
|
if err != nil {
|
2021-06-25 13:02:06 +00:00
|
|
|
wgErrs[n] = fmt.Errorf(
|
2021-11-22 06:48:18 +00:00
|
|
|
"failed to get snapshotcontent for %s in namespace %s: %w",
|
2021-06-25 13:02:06 +00:00
|
|
|
s.Name,
|
|
|
|
s.Namespace,
|
|
|
|
err)
|
2021-04-26 13:15:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if wgErrs[n] == nil {
|
|
|
|
wgErrs[n] = deleteSnapshot(&s, deployTimeout)
|
2021-07-09 11:49:23 +00:00
|
|
|
if wgErrs[n] == nil && kms != noKMS {
|
|
|
|
if kms.canGetPassphrase() {
|
2021-04-26 13:15:18 +00:00
|
|
|
// check passphrase deleted
|
2021-07-09 11:49:23 +00:00
|
|
|
stdOut, _ := kms.getPassphrase(f, *content.Status.SnapshotHandle)
|
2021-04-26 13:15:18 +00:00
|
|
|
if stdOut != "" {
|
|
|
|
wgErrs[n] = fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
|
|
|
|
}
|
|
|
|
}
|
2021-08-03 09:27:12 +00:00
|
|
|
if wgErrs[n] == nil && kms.canVerifyKeyDestroyed() {
|
|
|
|
destroyed, msg := kms.verifyKeyDestroyed(f, *content.Status.SnapshotHandle)
|
|
|
|
if !destroyed {
|
|
|
|
wgErrs[n] = fmt.Errorf("passphrased was not destroyed: %s", msg)
|
|
|
|
}
|
|
|
|
}
|
2021-04-26 13:15:18 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, snap)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("deleting snapshots failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, totalCount, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
wg.Add(totalCount)
|
|
|
|
// delete clone and app
|
|
|
|
for i := 0; i < totalCount; i++ {
|
2021-06-28 09:53:33 +00:00
|
|
|
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
2021-04-08 15:46:11 +00:00
|
|
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
|
|
|
p.Spec.DataSource.Name = name
|
|
|
|
wgErrs[n] = deletePVCAndApp(name, f, &p, &a)
|
2021-06-28 09:53:33 +00:00
|
|
|
wg.Done()
|
|
|
|
}(i, *pvcClone, *appClone)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
for i, err := range wgErrs {
|
|
|
|
if err != nil {
|
|
|
|
// not using Failf() as it aborts the test and does not log other errors
|
|
|
|
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
|
|
|
|
failed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed != 0 {
|
|
|
|
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// validate created backend rbd images
|
2021-03-19 09:09:28 +00:00
|
|
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
2021-04-08 15:46:11 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 07:43:52 +00:00
|
|
|
// validateController simulates the required operations to validate the
|
|
|
|
// controller.
|
|
|
|
// Controller will generates the omap data when the PV is created.
|
|
|
|
// for that we need to do below operations
|
|
|
|
// Create PVC with Retain policy
|
|
|
|
// Store the PVC and PV kubernetes objects so that we can create static
|
|
|
|
// binding between PVC-PV
|
|
|
|
// Delete the omap data created for PVC
|
|
|
|
// Create the static PVC and PV and let controller regenerate the omap
|
|
|
|
// Mount the PVC to application (NodeStage/NodePublish should work)
|
|
|
|
// Resize the PVC
|
|
|
|
// Delete the Application and PVC.
|
2021-07-16 13:16:36 +00:00
|
|
|
func validateController(
|
|
|
|
f *framework.Framework,
|
|
|
|
pvcPath, appPath, scPath string,
|
2022-06-01 10:17:19 +00:00
|
|
|
scOptions, scParams map[string]string,
|
|
|
|
) error {
|
2020-10-28 07:43:52 +00:00
|
|
|
size := "1Gi"
|
|
|
|
poolName := defaultRBDPool
|
|
|
|
expandSize := "10Gi"
|
|
|
|
var err error
|
|
|
|
// create storageclass with retain
|
2021-07-16 13:16:36 +00:00
|
|
|
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, scOptions, scParams,
|
|
|
|
retainPolicy)
|
2020-10-28 07:43:52 +00:00
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to create storageclass: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// create pvc
|
|
|
|
pvc, err := loadPVC(pvcPath)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to load PVC: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
2021-08-02 11:12:05 +00:00
|
|
|
resizePvc := pvc.DeepCopy()
|
2020-10-28 07:43:52 +00:00
|
|
|
resizePvc.Namespace = f.UniqueName
|
|
|
|
|
|
|
|
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
|
|
|
|
pvc.Namespace = f.UniqueName
|
|
|
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to create PVC: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// get pvc and pv object
|
|
|
|
pvc, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to get PVC: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// Recreate storageclass with delete policy
|
|
|
|
err = deleteResource(scPath)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to delete storageclass: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
2021-07-16 13:16:36 +00:00
|
|
|
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, scOptions, scParams,
|
|
|
|
deletePolicy)
|
2020-10-28 07:43:52 +00:00
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to create storageclass: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// delete omap data
|
2021-08-02 11:12:05 +00:00
|
|
|
err = deleteJournalInfoInPool(f, pvc, poolName)
|
2020-10-28 07:43:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// delete pvc and pv
|
|
|
|
err = deletePVCAndPV(f.ClientSet, pvc, pv, deployTimeout)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
return fmt.Errorf("failed to delete PVC or PV: %w", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// create pvc and pv with application
|
|
|
|
pv.Spec.ClaimRef = nil
|
|
|
|
pv.Spec.PersistentVolumeReclaimPolicy = deletePolicy
|
|
|
|
// unset the resource version as should not be set on objects to be created
|
|
|
|
pvc.ResourceVersion = ""
|
|
|
|
pv.ResourceVersion = ""
|
|
|
|
err = createPVCAndPV(f.ClientSet, pvc, pv)
|
|
|
|
if err != nil {
|
2021-07-10 10:45:11 +00:00
|
|
|
e2elog.Failf("failed to create PVC or PV: %v", err)
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// bind PVC to application
|
|
|
|
app, err := loadApp(appPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
app.Labels = map[string]string{"app": "resize-pvc"}
|
|
|
|
app.Namespace = f.UniqueName
|
|
|
|
opt := metav1.ListOptions{
|
|
|
|
LabelSelector: "app=resize-pvc",
|
|
|
|
}
|
|
|
|
err = createApp(f.ClientSet, app, deployTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-02 11:12:05 +00:00
|
|
|
if scParams["encrypted"] == strconv.FormatBool(true) {
|
|
|
|
// check encryption
|
|
|
|
err = isEncryptedPVC(f, resizePvc, app)
|
2020-10-28 07:43:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-02 11:12:05 +00:00
|
|
|
} else {
|
|
|
|
// resize PVC
|
|
|
|
err = expandPVCSize(f.ClientSet, resizePvc, expandSize, deployTimeout)
|
2020-10-28 07:43:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-02 11:12:05 +00:00
|
|
|
switch *pvc.Spec.VolumeMode {
|
|
|
|
case v1.PersistentVolumeFilesystem:
|
|
|
|
err = checkDirSize(app, f, &opt, expandSize)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case v1.PersistentVolumeBlock:
|
|
|
|
err = checkDeviceSize(app, f, &opt, expandSize)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 07:43:52 +00:00
|
|
|
}
|
|
|
|
// delete pvc and storageclass
|
|
|
|
err = deletePVCAndApp("", f, resizePvc, app)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-28 07:43:52 +00:00
|
|
|
return deleteResource(rbdExamplePath + "storageclass.yaml")
|
|
|
|
}
|
|
|
|
|
2020-10-14 07:35:58 +00:00
|
|
|
// k8sVersionGreaterEquals checks the ServerVersion of the Kubernetes cluster
|
|
|
|
// and compares it to the major.minor version passed. In case the version of
|
|
|
|
// the cluster is equal or higher to major.minor, `true` is returned, `false`
|
|
|
|
// otherwise.
|
|
|
|
//
|
|
|
|
// If fetching the ServerVersion of the Kubernetes cluster fails, the calling
|
|
|
|
// test case is marked as `FAILED` and gets aborted.
|
|
|
|
//
|
|
|
|
// nolint:unparam // currently major is always 1, this can change in the future
|
|
|
|
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
|
|
|
|
v, err := c.Discovery().ServerVersion()
|
|
|
|
if err != nil {
|
2021-11-22 06:48:18 +00:00
|
|
|
e2elog.Failf("failed to get server version: %v", err)
|
2020-10-14 07:35:58 +00:00
|
|
|
// Failf() marks the case as failure, and returns from the
|
|
|
|
// Go-routine that runs the case. This function will not have a
|
|
|
|
// return value.
|
|
|
|
}
|
|
|
|
|
|
|
|
maj := fmt.Sprintf("%d", major)
|
|
|
|
min := fmt.Sprintf("%d", minor)
|
|
|
|
|
|
|
|
return (v.Major > maj) || (v.Major == maj && v.Minor >= min)
|
|
|
|
}
|
2021-07-08 08:33:17 +00:00
|
|
|
|
|
|
|
// waitForJobCompletion polls the status of the given job and waits until the
|
|
|
|
// jobs has succeeded or until the timeout is hit.
|
|
|
|
func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) error {
|
|
|
|
t := time.Duration(timeout) * time.Minute
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
e2elog.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
|
|
|
|
|
|
|
|
return wait.PollImmediate(poll, t, func() (bool, error) {
|
|
|
|
j, err := c.BatchV1().Jobs(ns).Get(context.TODO(), job, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
if isRetryableAPIError(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-08 08:33:17 +00:00
|
|
|
return false, fmt.Errorf("failed to get Job: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if j.Status.CompletionTime != nil {
|
|
|
|
// Job has successfully completed
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
e2elog.Logf(
|
|
|
|
"Job %s/%s has not completed yet (%d seconds elapsed)",
|
|
|
|
ns, job, int(time.Since(start).Seconds()))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-08 08:33:17 +00:00
|
|
|
return false, nil
|
|
|
|
})
|
|
|
|
}
|
2021-07-16 12:37:30 +00:00
|
|
|
|
|
|
|
// kubectlAction is used to tell retryKubectlInput() what action needs to be
|
|
|
|
// done.
|
|
|
|
type kubectlAction string
|
|
|
|
|
|
|
|
const (
|
2021-07-22 06:28:10 +00:00
|
|
|
// kubectlCreate tells retryKubectlInput() to run "create".
|
2021-07-16 12:37:30 +00:00
|
|
|
kubectlCreate = kubectlAction("create")
|
2021-07-22 06:28:10 +00:00
|
|
|
// kubectlDelete tells retryKubectlInput() to run "delete".
|
2021-07-16 12:37:30 +00:00
|
|
|
kubectlDelete = kubectlAction("delete")
|
|
|
|
)
|
|
|
|
|
|
|
|
// String returns the string format of the kubectlAction, this is automatically
|
|
|
|
// used when formatting strings with %s or %q.
|
|
|
|
func (ka kubectlAction) String() string {
|
|
|
|
return string(ka)
|
|
|
|
}
|
|
|
|
|
|
|
|
// retryKubectlInput takes a namespace and action telling kubectl what to do,
|
|
|
|
// it then feeds data through stdin to the process. This function retries until
|
|
|
|
// no error occurred, or the timeout passed.
|
2021-07-28 03:49:33 +00:00
|
|
|
func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error {
|
2021-07-16 12:37:30 +00:00
|
|
|
timeout := time.Duration(t) * time.Minute
|
2021-08-26 12:20:35 +00:00
|
|
|
e2elog.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
|
2021-07-16 12:37:30 +00:00
|
|
|
start := time.Now()
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
2021-07-28 03:49:33 +00:00
|
|
|
cmd := []string{}
|
|
|
|
if len(args) != 0 {
|
|
|
|
cmd = append(cmd, strings.Join(args, ""))
|
|
|
|
}
|
|
|
|
cmd = append(cmd, []string{string(action), "-f", "-"}...)
|
|
|
|
|
|
|
|
_, err := framework.RunKubectlInput(namespace, data, cmd...)
|
2021-07-16 12:37:30 +00:00
|
|
|
if err != nil {
|
|
|
|
if isRetryableAPIError(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2022-04-25 12:23:26 +00:00
|
|
|
if action == kubectlCreate && isAlreadyExistsCLIError(err) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if action == kubectlDelete && isNotFoundCLIError(err) {
|
2021-07-28 03:15:43 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
2021-07-16 12:37:30 +00:00
|
|
|
e2elog.Logf(
|
2021-07-28 03:49:33 +00:00
|
|
|
"will run kubectl (%s) args (%s) again (%d seconds elapsed)",
|
2021-07-16 12:37:30 +00:00
|
|
|
action,
|
2021-07-28 03:49:33 +00:00
|
|
|
args,
|
2021-07-16 12:37:30 +00:00
|
|
|
int(time.Since(start).Seconds()))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// retryKubectlFile takes a namespace and action telling kubectl what to do
|
2021-07-28 03:49:33 +00:00
|
|
|
// with the passed filename and arguments. This function retries until no error
|
|
|
|
// occurred, or the timeout passed.
|
|
|
|
func retryKubectlFile(namespace string, action kubectlAction, filename string, t int, args ...string) error {
|
2021-07-16 12:37:30 +00:00
|
|
|
timeout := time.Duration(t) * time.Minute
|
2021-07-28 03:49:33 +00:00
|
|
|
e2elog.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
|
2021-07-16 12:37:30 +00:00
|
|
|
start := time.Now()
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
2021-07-28 03:49:33 +00:00
|
|
|
cmd := []string{}
|
|
|
|
if len(args) != 0 {
|
|
|
|
cmd = append(cmd, strings.Join(args, ""))
|
|
|
|
}
|
|
|
|
cmd = append(cmd, []string{string(action), "-f", filename}...)
|
|
|
|
|
|
|
|
_, err := framework.RunKubectl(namespace, cmd...)
|
2021-07-16 12:37:30 +00:00
|
|
|
if err != nil {
|
|
|
|
if isRetryableAPIError(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2022-04-25 12:23:26 +00:00
|
|
|
if action == kubectlCreate && isAlreadyExistsCLIError(err) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if action == kubectlDelete && isNotFoundCLIError(err) {
|
2021-07-28 03:15:43 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
2021-07-16 12:37:30 +00:00
|
|
|
e2elog.Logf(
|
2021-07-28 03:49:33 +00:00
|
|
|
"will run kubectl (%s -f %q args %s) again (%d seconds elapsed)",
|
2021-07-16 12:37:30 +00:00
|
|
|
action,
|
|
|
|
filename,
|
2021-07-28 03:49:33 +00:00
|
|
|
args,
|
2021-07-16 12:37:30 +00:00
|
|
|
int(time.Since(start).Seconds()))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-16 12:37:30 +00:00
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
}
|
2021-07-28 03:47:05 +00:00
|
|
|
|
|
|
|
// retryKubectlArgs takes a namespace and action telling kubectl what to do
|
|
|
|
// with the passed arguments. This function retries until no error occurred, or
|
|
|
|
// the timeout passed.
|
2021-10-01 04:57:38 +00:00
|
|
|
// nolint:unparam // retryKubectlArgs will be used with kubectlDelete arg later on.
|
2021-07-28 03:47:05 +00:00
|
|
|
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
|
|
|
timeout := time.Duration(t) * time.Minute
|
|
|
|
args = append([]string{string(action)}, args...)
|
|
|
|
e2elog.Logf("waiting for kubectl (%s args) to finish", args)
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
|
|
|
_, err := framework.RunKubectl(namespace, args...)
|
|
|
|
if err != nil {
|
|
|
|
if isRetryableAPIError(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2022-04-25 12:23:26 +00:00
|
|
|
if action == kubectlCreate && isAlreadyExistsCLIError(err) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if action == kubectlDelete && isNotFoundCLIError(err) {
|
2021-07-28 03:47:05 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
e2elog.Logf(
|
|
|
|
"will run kubectl (%s) again (%d seconds elapsed)",
|
|
|
|
args,
|
|
|
|
int(time.Since(start).Seconds()))
|
|
|
|
|
|
|
|
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
}
|
2022-04-11 15:54:53 +00:00
|
|
|
|
|
|
|
// rwopSupported indicates that a test using RWOP is expected to succeed. If
|
|
|
|
// the accessMode is reported as invalid, rwopSupported will be set to false.
|
|
|
|
var rwopSupported = true
|
|
|
|
|
|
|
|
// rwopMayFail returns true if the accessMode is not valid. k8s v1.22 requires
|
|
|
|
// a feature gate, which might not be set. In case the accessMode is invalid,
|
|
|
|
// the featuregate is not set, and testing RWOP is not possible.
|
|
|
|
func rwopMayFail(err error) bool {
|
|
|
|
if !rwopSupported {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(err.Error(), `invalid: spec.accessModes: Unsupported value: "ReadWriteOncePod"`) {
|
|
|
|
rwopSupported = false
|
|
|
|
}
|
|
|
|
|
|
|
|
return !rwopSupported
|
|
|
|
}
|