mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 20:00:23 +00:00
e2e: add tests to validate cluster name
Validate with both volumes and snapshots Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
This commit is contained in:
parent
c4de0854da
commit
8364493eb9
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -297,3 +298,180 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDeploymentUpdateScale(
|
||||
c kubernetes.Interface,
|
||||
ns,
|
||||
deploymentName string,
|
||||
scale *autoscalingv1.Scale,
|
||||
timeout int,
|
||||
) error {
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
scaleResult, upsErr := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(),
|
||||
deploymentName, scale, metav1.UpdateOptions{})
|
||||
if upsErr != nil {
|
||||
if isRetryableAPIError(upsErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment UpdateScale %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error update scale deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
}
|
||||
if scaleResult.Spec.Replicas != scale.Spec.Replicas {
|
||||
e2elog.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d",
|
||||
ns, deploymentName, scale.Spec.Replicas, scaleResult.Spec.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error scale not matching in deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed update scale deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDeploymentUpdate(
|
||||
c kubernetes.Interface,
|
||||
deployment *appsv1.Deployment,
|
||||
timeout int,
|
||||
) error {
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
_, upErr := c.AppsV1().Deployments(deployment.Namespace).Update(
|
||||
context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
if upErr != nil {
|
||||
if isRetryableAPIError(upErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment Update %s/%s has not completed yet (%d seconds elapsed)",
|
||||
deployment.Namespace, deployment.Name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error updating deployment %s/%s: %w",
|
||||
deployment.Namespace, deployment.Name, upErr)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed update deployment %s/%s: %w", deployment.Namespace, deployment.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// contains check if slice contains string.
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func waitForContainersArgsUpdate(
|
||||
c kubernetes.Interface,
|
||||
ns,
|
||||
deploymentName,
|
||||
key,
|
||||
value string,
|
||||
containers []string,
|
||||
timeout int,
|
||||
) error {
|
||||
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||
|
||||
// Scale down to 0.
|
||||
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error get scale deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
count := scale.Spec.Replicas
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = 0
|
||||
err = waitForDeploymentUpdateScale(c, ns, deploymentName, scale, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update deployment.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error get deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
cid := deployment.Spec.Template.Spec.Containers // cid: read as containers in deployment
|
||||
for i := range cid {
|
||||
if contains(containers, cid[i].Name) {
|
||||
match := false
|
||||
for j, ak := range cid[i].Args {
|
||||
if ak == key {
|
||||
// do replacement of value
|
||||
match = true
|
||||
cid[i].Args[j] = fmt.Sprintf("--%s=%s", key, value)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
// append a new key value
|
||||
cid[i].Args = append(cid[i].Args, fmt.Sprintf("--%s=%s", key, value))
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers[i].Args = cid[i].Args
|
||||
}
|
||||
}
|
||||
// clear creationTimestamp, generation, resourceVersion, and uid
|
||||
deployment.CreationTimestamp = metav1.Time{}
|
||||
deployment.Generation = 0
|
||||
deployment.ResourceVersion = "0"
|
||||
deployment.UID = ""
|
||||
err = waitForDeploymentUpdate(c, deployment, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Scale up to count.
|
||||
scale.Spec.Replicas = count
|
||||
err = waitForDeploymentUpdateScale(c, ns, deploymentName, scale, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for scale to become count
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err = wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
deploy, getErr := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
if isRetryableAPIError(getErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment Get %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error getting deployment %s/%s: %w", ns, deploymentName, getErr)
|
||||
}
|
||||
if deploy.Status.Replicas != count {
|
||||
e2elog.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error expected deployment %s/%s replicas %d, got %d",
|
||||
ns, deploymentName, count, deploy.Status.Replicas)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
42
e2e/rbd.go
42
e2e/rbd.go
@ -211,6 +211,37 @@ func checkGetKeyError(err error, stdErr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// checkClusternameInMetadata check for cluster name metadata on RBD image.
|
||||
// nolint:nilerr // intentionally returning nil on error in the retry loop.
|
||||
func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) {
|
||||
t := time.Duration(deployTimeout) * time.Minute
|
||||
var (
|
||||
coName string
|
||||
stdErr string
|
||||
execErr error
|
||||
)
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
coName, stdErr, execErr = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(pool), image, clusterNameKey),
|
||||
ns)
|
||||
if execErr != nil || stdErr != "" {
|
||||
e2elog.Logf("failed to get cluster name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(pool), image, clusterNameKey, execErr, stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("could not get cluster name %s/%s %s: %v", rbdOptions(pool), image, clusterNameKey, err)
|
||||
}
|
||||
coName = strings.TrimSuffix(coName, "\n")
|
||||
if coName != defaultClusterName {
|
||||
e2elog.Failf("expected coName %q got %q", defaultClusterName, coName)
|
||||
}
|
||||
}
|
||||
|
||||
var _ = Describe("RBD", func() {
|
||||
f := framework.NewDefaultFramework(rbdType)
|
||||
var c clientset.Interface
|
||||
@ -290,6 +321,14 @@ var _ = Describe("RBD", func() {
|
||||
if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) {
|
||||
nbdMapOptions = "nbd:debug-rbd=20,io-timeout=330"
|
||||
}
|
||||
|
||||
// wait for cluster name update in deployment
|
||||
containers := []string{"csi-rbdplugin", "csi-rbdplugin-controller"}
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, rbdDeploymentName,
|
||||
"clustername", defaultClusterName, containers, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -453,6 +492,8 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("expected pvName %q got %q", pvcObj.Spec.VolumeName, pvName)
|
||||
}
|
||||
|
||||
checkClusternameInMetadata(f, rookNamespace, defaultRBDPool, imageList[0])
|
||||
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc: %v", err)
|
||||
@ -701,6 +742,7 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("PV name found on %s/%s %s=%s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvNameKey, pvName, err, stdErr)
|
||||
}
|
||||
checkClusternameInMetadata(f, rookNamespace, defaultRBDPool, imageList[0])
|
||||
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
|
@ -70,6 +70,10 @@ const (
|
||||
rbdPodLabels = "app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)"
|
||||
|
||||
exitOneErr = "command terminated with exit code 1"
|
||||
|
||||
// cluster Name, set by user.
|
||||
clusterNameKey = "csi.ceph.com/cluster/name"
|
||||
defaultClusterName = "k8s-cluster-1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
Loading…
Reference in New Issue
Block a user