mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-03-06 07:29:29 +00:00
e2e: handle ceph-csi-operator deployment changes
This commits adds e2e/operator.go containing utility methods specific to the operator. Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
parent
1d08481ee4
commit
15a9d3c030
@ -46,6 +46,14 @@ var (
|
||||
subvolumegroup = "e2e"
|
||||
fileSystemName = "myfs"
|
||||
fileSystemPoolName = "myfs-replicated"
|
||||
|
||||
helmCephFSPodsLabel = "ceph-csi-cephfs"
|
||||
|
||||
operatorCephFSDeploymentName = "cephfs.csi.ceph.com-ctrlplugin"
|
||||
operatorCephFSDaemonsetName = "cephfs.csi.ceph.com-nodeplugin"
|
||||
cephFSPodSelector = fmt.Sprintf("app in (%s, %s, %s, %s, %s)",
|
||||
helmCephFSPodsLabel, cephFSDeploymentName, cephFSDeamonSetName,
|
||||
operatorCephFSDeploymentName, operatorCephFSDaemonsetName)
|
||||
)
|
||||
|
||||
func deployCephfsPlugin() {
|
||||
@ -175,13 +183,19 @@ var _ = Describe(cephfsType, func() {
|
||||
Skip("Skipping CephFS E2E")
|
||||
}
|
||||
c = f.ClientSet
|
||||
if deployCephFS {
|
||||
if cephCSINamespace != defaultNs {
|
||||
if operatorDeployment {
|
||||
cephFSDeploymentName = operatorCephFSDeploymentName
|
||||
cephFSDeamonSetName = operatorCephFSDaemonsetName
|
||||
}
|
||||
// No need to create the namespace if ceph-csi is deployed via helm or operator.
|
||||
if cephCSINamespace != defaultNs && (!helmTest && !operatorDeployment) {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
|
||||
if deployCephFS {
|
||||
deployCephfsPlugin()
|
||||
}
|
||||
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
@ -209,11 +223,15 @@ var _ = Describe(cephfsType, func() {
|
||||
deployVault(f.ClientSet, deployTimeout)
|
||||
|
||||
// wait for cluster name update in deployment
|
||||
if operatorDeployment {
|
||||
err = setClusterName(defaultClusterName)
|
||||
} else {
|
||||
containers := []string{cephFSContainerName}
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, cephFSDeploymentName,
|
||||
"clustername", defaultClusterName, containers, deployTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, cephFSDeploymentName, err)
|
||||
framework.Failf("timeout waiting for clustername arg update %s/%s: %v", cephCSINamespace, cephFSDeploymentName, err)
|
||||
}
|
||||
|
||||
err = createSubvolumegroup(f, fileSystemName, subvolumegroup)
|
||||
@ -226,13 +244,14 @@ var _ = Describe(cephfsType, func() {
|
||||
if !testCephFS || upgradeTesting {
|
||||
Skip("Skipping CephFS E2E")
|
||||
}
|
||||
|
||||
if CurrentSpecReport().Failed() {
|
||||
// log pods created by helm chart
|
||||
logsCSIPods("app=ceph-csi-cephfs", c)
|
||||
logsCSIPods("app="+helmCephFSPodsLabel, c)
|
||||
// log provisioner
|
||||
logsCSIPods("app=csi-cephfsplugin-provisioner", c)
|
||||
logsCSIPods("app="+cephFSDeploymentName, c)
|
||||
// log node plugin
|
||||
logsCSIPods("app=csi-cephfsplugin", c)
|
||||
logsCSIPods("app="+cephFSDeamonSetName, c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||
@ -266,13 +285,14 @@ var _ = Describe(cephfsType, func() {
|
||||
|
||||
if deployCephFS {
|
||||
deleteCephfsPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
}
|
||||
// No need to create the namespace if ceph-csi is deployed via helm or operator.
|
||||
if cephCSINamespace != defaultNs && (!helmTest && !operatorDeployment) {
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
Context("Test CephFS CSI", func() {
|
||||
@ -2506,20 +2526,10 @@ var _ = Describe(cephfsType, func() {
|
||||
framework.Failf("failed to create configmap: %v", err)
|
||||
}
|
||||
|
||||
// delete csi pods
|
||||
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
|
||||
cephCSINamespace, false)
|
||||
// restart csi pods for the configmap to take effect.
|
||||
err = recreateCSIPods(f, cephFSPodSelector, cephFSDeamonSetName, cephFSDeploymentName)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete pods with labels: %v", err)
|
||||
}
|
||||
// wait for csi pods to come up
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for daemonset pods: %v", err)
|
||||
}
|
||||
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment pods: %v", err)
|
||||
framework.Failf("failed to recreate cephfs csi pods: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,6 +93,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
|
@ -92,4 +92,8 @@ func handleFlags() {
|
||||
testNFS = testCephFS
|
||||
deployNFS = deployCephFS
|
||||
}
|
||||
|
||||
if operatorDeployment {
|
||||
cephCSINamespace = "ceph-csi-operator-system"
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes
|
||||
return fmt.Errorf("failed to create configmap: %w", err)
|
||||
}
|
||||
// restart csi pods for the configmap to take effect.
|
||||
err = recreateCSIPods(f, rbdPodLabels, rbdDaemonsetName, rbdDeploymentName)
|
||||
err = recreateCSIPods(f, rbdPodSelector, rbdDaemonsetName, rbdDeploymentName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recreate rbd csi pods: %w", err)
|
||||
}
|
||||
|
59
e2e/nfs.go
59
e2e/nfs.go
@ -50,9 +50,12 @@ var (
|
||||
|
||||
// FIXME: some tests change the subvolumegroup to "e2e".
|
||||
defaultSubvolumegroup = "csi"
|
||||
|
||||
operatorNFSDeploymentName = "nfs.csi.ceph.com-ctrlplugin"
|
||||
operatorNFSDaemonsetName = "nfs.csi.ceph.com-nodeplugin"
|
||||
)
|
||||
|
||||
func deployNFSPlugin(f *framework.Framework) {
|
||||
func deployNFSPlugin() {
|
||||
// delete objects deployed by rook
|
||||
|
||||
err := deleteResource(nfsDirPath + nfsProvisionerRBAC)
|
||||
@ -65,13 +68,6 @@ func deployNFSPlugin(f *framework.Framework) {
|
||||
framework.Failf("failed to delete nodeplugin rbac %s: %v", nfsDirPath+nfsNodePluginRBAC, err)
|
||||
}
|
||||
|
||||
// the pool should not be deleted, as it may contain configurations
|
||||
// from non-e2e related CephNFS objects
|
||||
err = createPool(f, nfsPoolName)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err)
|
||||
}
|
||||
|
||||
createORDeleteNFSResources(kubectlCreate)
|
||||
}
|
||||
|
||||
@ -79,6 +75,30 @@ func deleteNFSPlugin() {
|
||||
createORDeleteNFSResources(kubectlDelete)
|
||||
}
|
||||
|
||||
func createNFSPool(f *framework.Framework) {
|
||||
// the pool should not be deleted, as it may contain configurations
|
||||
// from non-e2e related CephNFS objects
|
||||
err := createPool(f, nfsPoolName)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err)
|
||||
}
|
||||
|
||||
resources := []ResourceDeployer{
|
||||
// NFS server deployment
|
||||
&yamlResourceNamespaced{
|
||||
filename: nfsExamplePath + nfsRookCephNFS,
|
||||
namespace: rookNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
for _, r := range resources {
|
||||
err := r.Do(kubectlCreate)
|
||||
if err != nil {
|
||||
framework.Failf("failed to %s resource: %v", kubectlCreate, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createORDeleteNFSResources(action kubectlAction) {
|
||||
cephConfigFile := getConfigFile(cephConfconfigMap, deployPath, examplePath)
|
||||
resources := []ResourceDeployer{
|
||||
@ -242,14 +262,21 @@ var _ = Describe("nfs", func() {
|
||||
Skip("Skipping NFS E2E")
|
||||
}
|
||||
c = f.ClientSet
|
||||
if deployNFS {
|
||||
if cephCSINamespace != defaultNs {
|
||||
if operatorDeployment {
|
||||
nfsDeploymentName = operatorNFSDeploymentName
|
||||
nfsDeamonSetName = operatorNFSDaemonsetName
|
||||
}
|
||||
// No need to create the namespace if ceph-csi is deployed via operator.
|
||||
if cephCSINamespace != defaultNs && !operatorDeployment {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
deployNFSPlugin(f)
|
||||
|
||||
createNFSPool(f)
|
||||
if deployNFS {
|
||||
deployNFSPlugin()
|
||||
}
|
||||
|
||||
// cephfs testing might have changed the default subvolumegroup
|
||||
@ -287,13 +314,14 @@ var _ = Describe("nfs", func() {
|
||||
if !testNFS || upgradeTesting {
|
||||
Skip("Skipping NFS E2E")
|
||||
}
|
||||
|
||||
if CurrentSpecReport().Failed() {
|
||||
// log pods created by helm chart
|
||||
logsCSIPods("app=ceph-csi-nfs", c)
|
||||
// log provisioner
|
||||
logsCSIPods("app=csi-nfsplugin-provisioner", c)
|
||||
logsCSIPods("app="+nfsDeploymentName, c)
|
||||
// log node plugin
|
||||
logsCSIPods("app=csi-nfsplugin", c)
|
||||
logsCSIPods("app="+nfsDeamonSetName, c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||
@ -325,13 +353,14 @@ var _ = Describe("nfs", func() {
|
||||
|
||||
if deployNFS {
|
||||
deleteNFSPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
}
|
||||
// No need to delete the namespace if ceph-csi is deployed via operator.
|
||||
if cephCSINamespace != defaultNs && !operatorDeployment {
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
Context("Test NFS CSI", func() {
|
||||
|
93
e2e/operator.go
Normal file
93
e2e/operator.go
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
Copyright 2024 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
OperatorConfigName = "ceph-csi-operator-config"
|
||||
)
|
||||
|
||||
func setEnableMetadata(value bool) error {
|
||||
command := []string{
|
||||
"operatorconfigs.csi.ceph.io",
|
||||
OperatorConfigName,
|
||||
"--type=merge",
|
||||
"-p",
|
||||
fmt.Sprintf(`{"spec": {"driverSpecDefaults": {"enableMetadata": %t}}}`, value),
|
||||
}
|
||||
|
||||
// Patch the operator config
|
||||
err := retryKubectlArgs(cephCSINamespace, kubectlPatch, deployTimeout, command...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setClusterName(value string) error {
|
||||
command := []string{
|
||||
"operatorconfigs.csi.ceph.io",
|
||||
OperatorConfigName,
|
||||
"--type=merge",
|
||||
"-p",
|
||||
fmt.Sprintf(`{"spec": {"driverSpecDefaults": {"clusterName": %q}}}`, value),
|
||||
}
|
||||
|
||||
// Patch the operator config
|
||||
err := retryKubectlArgs(cephCSINamespace, kubectlPatch, deployTimeout, command...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set cluster name: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDomainLabels(labels []string) error {
|
||||
// Define the patch operations
|
||||
patchOps := []map[string]interface{}{
|
||||
{"op": "add", "path": "/spec/driverSpecDefaults/nodePlugin", "value": map[string]interface{}{}},
|
||||
{"op": "add", "path": "/spec/driverSpecDefaults/nodePlugin/topology", "value": map[string]interface{}{}},
|
||||
{"op": "add", "path": "/spec/driverSpecDefaults/nodePlugin/topology/domainLabels", "value": labels},
|
||||
}
|
||||
|
||||
// Serialize to JSON
|
||||
patchJSON, err := json.Marshal(patchOps)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal patch JSON: %w", err)
|
||||
}
|
||||
|
||||
command := []string{
|
||||
"operatorconfigs.csi.ceph.io",
|
||||
OperatorConfigName,
|
||||
"--type=json",
|
||||
"-p",
|
||||
string(patchJSON),
|
||||
}
|
||||
|
||||
// Patch the operator config
|
||||
err = retryKubectlArgs(cephCSINamespace, kubectlPatch, deployTimeout, command...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set domain labels: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -433,7 +433,6 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:unparam // currently skipNotFound is always false, this can change in the future
|
||||
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
err := retryKubectlArgs(
|
||||
ns,
|
||||
|
84
e2e/rbd.go
84
e2e/rbd.go
@ -109,6 +109,13 @@ var (
|
||||
volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name"
|
||||
volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace"
|
||||
volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name"
|
||||
|
||||
helmRBDPodsLabel = "ceph-csi-rbd"
|
||||
|
||||
operatorRBDDeploymentName = "rbd.csi.ceph.com-ctrlplugin"
|
||||
operatorRBDDaemonsetName = "rbd.csi.ceph.com-nodeplugin"
|
||||
rbdPodSelector = fmt.Sprintf("app in (%s, %s, %s, %s, %s)",
|
||||
helmRBDPodsLabel, rbdDeploymentName, rbdDaemonsetName, operatorRBDDeploymentName, operatorRBDDaemonsetName)
|
||||
)
|
||||
|
||||
func deployRBDPlugin() {
|
||||
@ -290,7 +297,24 @@ var _ = Describe("RBD", func() {
|
||||
Skip("Skipping RBD E2E")
|
||||
}
|
||||
c = f.ClientSet
|
||||
if deployRBD {
|
||||
if operatorDeployment {
|
||||
rbdDeploymentName = operatorRBDDeploymentName
|
||||
rbdDaemonsetName = operatorRBDDaemonsetName
|
||||
|
||||
err := setDomainLabels([]string{nodeRegionLabel, nodeZoneLabel})
|
||||
if err != nil {
|
||||
framework.Failf("failed to set domain labels: %v", err)
|
||||
}
|
||||
}
|
||||
// No need to create the namespace if ceph-csi is deployed via helm or operator.
|
||||
if cephCSINamespace != defaultNs && (!helmTest && !operatorDeployment) {
|
||||
err := createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create namespace: %v", err)
|
||||
}
|
||||
}
|
||||
// helm script already adds node labels
|
||||
if !helmTest {
|
||||
err := addLabelsToNodes(f, map[string]string{
|
||||
nodeRegionLabel: regionValue,
|
||||
nodeZoneLabel: zoneValue,
|
||||
@ -300,12 +324,8 @@ var _ = Describe("RBD", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to add node labels: %v", err)
|
||||
}
|
||||
if cephCSINamespace != defaultNs {
|
||||
err = createNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create namespace: %v", err)
|
||||
}
|
||||
}
|
||||
if deployRBD {
|
||||
deployRBDPlugin()
|
||||
}
|
||||
err := createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
@ -362,11 +382,15 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
|
||||
// wait for cluster name update in deployment
|
||||
if operatorDeployment {
|
||||
err = setClusterName(defaultClusterName)
|
||||
} else {
|
||||
containers := []string{"csi-rbdplugin", "csi-rbdplugin-controller"}
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, rbdDeploymentName,
|
||||
"clustername", defaultClusterName, containers, deployTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
framework.Failf("timeout waiting for clustername arg update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -374,13 +398,14 @@ var _ = Describe("RBD", func() {
|
||||
if !testRBD || upgradeTesting {
|
||||
Skip("Skipping RBD E2E")
|
||||
}
|
||||
|
||||
if CurrentSpecReport().Failed() {
|
||||
// log pods created by helm chart
|
||||
logsCSIPods("app=ceph-csi-rbd", c)
|
||||
logsCSIPods("app="+helmRBDPodsLabel, c)
|
||||
// log provisioner
|
||||
logsCSIPods("app=csi-rbdplugin-provisioner", c)
|
||||
logsCSIPods("app="+rbdDeploymentName, c)
|
||||
// log node plugin
|
||||
logsCSIPods("app=csi-rbdplugin", c)
|
||||
logsCSIPods("app="+rbdDaemonsetName, c)
|
||||
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||
@ -410,13 +435,14 @@ var _ = Describe("RBD", func() {
|
||||
deleteVault()
|
||||
if deployRBD {
|
||||
deleteRBDPlugin()
|
||||
if cephCSINamespace != defaultNs {
|
||||
}
|
||||
// No need to delete the namespace if ceph-csi is deployed via helm or operator.
|
||||
if cephCSINamespace != defaultNs && (!operatorDeployment && !helmTest) {
|
||||
err = deleteNamespace(c, cephCSINamespace)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete namespace: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = deleteNodeLabels(c, []string{
|
||||
nodeRegionLabel,
|
||||
nodeZoneLabel,
|
||||
@ -2832,7 +2858,11 @@ var _ = Describe("RBD", func() {
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
// delete rbd nodeplugin pods
|
||||
err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false)
|
||||
selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName)
|
||||
if err != nil {
|
||||
framework.Failf("failed to get the labels: %v", err)
|
||||
}
|
||||
err = deletePodWithLabel(selector, cephCSINamespace, false)
|
||||
if err != nil {
|
||||
framework.Failf("fail to delete pod: %v", err)
|
||||
}
|
||||
@ -3911,20 +3941,10 @@ var _ = Describe("RBD", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create rados namespace: %v", err)
|
||||
}
|
||||
// delete csi pods
|
||||
err = deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)",
|
||||
cephCSINamespace, false)
|
||||
// restart csi pods for the configmap to take effect.
|
||||
err = recreateCSIPods(f, rbdPodSelector, rbdDaemonsetName, rbdDeploymentName)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete pods with labels: %v", err)
|
||||
}
|
||||
// wait for csi pods to come up
|
||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for daemonset pods: %v", err)
|
||||
}
|
||||
err = waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment to be in running state: %v", err)
|
||||
framework.Failf("failed to recreate rbd csi pods: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -5010,10 +5030,14 @@ var _ = Describe("RBD", func() {
|
||||
|
||||
// wait for cluster name update in deployment
|
||||
containers := []string{"csi-rbdplugin", "csi-rbdplugin-controller"}
|
||||
if operatorDeployment {
|
||||
err = setEnableMetadata(false)
|
||||
} else {
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, rbdDeploymentName,
|
||||
"setmetadata", "false", containers, deployTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
framework.Failf("failed to update setmetadata arg in %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
}
|
||||
pvcSmartClone, err := loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
@ -5113,11 +5137,15 @@ var _ = Describe("RBD", func() {
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, snapsType)
|
||||
if operatorDeployment {
|
||||
err = setEnableMetadata(true)
|
||||
} else {
|
||||
// wait for cluster name update in deployment
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, rbdDeploymentName,
|
||||
"setmetadata", "true", containers, deployTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
framework.Failf("failed to update setmetadata arg in %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -68,9 +68,6 @@ const (
|
||||
appCloneLabel = "app-clone"
|
||||
|
||||
noError = ""
|
||||
// labels/selector used to list/delete rbd pods.
|
||||
rbdPodLabels = "app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)"
|
||||
|
||||
exitOneErr = "command terminated with exit code 1"
|
||||
|
||||
// cluster Name, set by user.
|
||||
@ -1634,6 +1631,8 @@ const (
|
||||
kubectlCreate = kubectlAction("create")
|
||||
// kubectlDelete tells retryKubectlInput() to run "delete".
|
||||
kubectlDelete = kubectlAction("delete")
|
||||
// kubectlPatch tells retryKubectlInput() to run "patch".
|
||||
kubectlPatch = kubectlAction("patch")
|
||||
)
|
||||
|
||||
// String returns the string format of the kubectlAction, this is automatically
|
||||
@ -1724,8 +1723,6 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
// retryKubectlArgs takes a namespace and action telling kubectl what to do
|
||||
// with the passed arguments. This function retries until no error occurred, or
|
||||
// the timeout passed.
|
||||
//
|
||||
//nolint:unparam // retryKubectlArgs will be used with kubectlDelete arg later on.
|
||||
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
args = append([]string{string(action)}, args...)
|
||||
@ -1749,7 +1746,7 @@ func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...str
|
||||
args,
|
||||
int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("failed to run kubectl: %w", err)
|
||||
return false, fmt.Errorf("failed to run kubectl: %v, error: %w", args, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -169,7 +169,7 @@ spec:
|
||||
- name: PLUGIN_ROLE
|
||||
value: csi-kubernetes
|
||||
- name: SERVICE_ACCOUNTS
|
||||
value: rbd-csi-nodeplugin,rbd-csi-provisioner,csi-rbdplugin,csi-rbdplugin-provisioner,cephfs-csi-nodeplugin,cephfs-csi-provisioner,csi-cephfsplugin,csi-cephfsplugin-provisioner
|
||||
value: rbd-csi-nodeplugin,rbd-csi-provisioner,csi-rbdplugin,csi-rbdplugin-provisioner,cephfs-csi-nodeplugin,cephfs-csi-provisioner,csi-cephfsplugin,csi-cephfsplugin-provisioner,ceph-csi-operator-rbd-ctrlplugin-sa,ceph-csi-operator-rbd-nodeplugin-sa,ceph-csi-operator-cephfs-ctrlplugin-sa,ceph-csi-operator-cephfs-nodeplugin-sa
|
||||
- name: SERVICE_ACCOUNTS_NAMESPACE
|
||||
value: default
|
||||
- name: VAULT_ADDR
|
||||
|
Loading…
Reference in New Issue
Block a user