cephfs: add upgrade testing

Upgrade testing will enable us to keep
in check the backward compatibility of
earlier releases.

Signed-off-by: Yug <yuggupta27@gmail.com>
This commit is contained in:
Yug 2020-07-25 22:09:50 +05:30 committed by mergify[bot]
parent 9c0d5abb5a
commit 9b30969594
5 changed files with 240 additions and 4 deletions

View File

@ -23,6 +23,7 @@ var (
cephfsDeamonSetName = "csi-cephfsplugin" cephfsDeamonSetName = "csi-cephfsplugin"
cephfsDirPath = "../deploy/cephfs/kubernetes/" cephfsDirPath = "../deploy/cephfs/kubernetes/"
cephfsExamplePath = "../examples/cephfs/" cephfsExamplePath = "../examples/cephfs/"
subvolumegroup = "e2e"
) )
func deployCephfsPlugin() { func deployCephfsPlugin() {
@ -115,7 +116,7 @@ var _ = Describe("cephfs", func() {
var c clientset.Interface var c clientset.Interface
// deploy cephfs CSI // deploy cephfs CSI
BeforeEach(func() { BeforeEach(func() {
if !testCephFS { if !testCephFS || upgradeTesting {
Skip("Skipping CephFS E2E") Skip("Skipping CephFS E2E")
} }
c = f.ClientSet c = f.ClientSet
@ -133,7 +134,7 @@ var _ = Describe("cephfs", func() {
}) })
AfterEach(func() { AfterEach(func() {
if !testCephFS { if !testCephFS || upgradeTesting {
Skip("Skipping CephFS E2E") Skip("Skipping CephFS E2E")
} }
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {

View File

@ -22,6 +22,8 @@ func init() {
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver") flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver")
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
flag.StringVar(&upgradeVersion, "upgrade-version", "v2.1.2", "target version for upgrade testing")
flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed") flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed")
flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed") flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed")
setDefaultKubeconfig() setDefaultKubeconfig()

185
e2e/upgrade-cephfs.go Normal file
View File

@ -0,0 +1,185 @@
package e2e
import (
"context"
"os"
. "github.com/onsi/ginkgo" // nolint
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
var _ = Describe("CephFS Upgrade Testing", func() {
f := framework.NewDefaultFramework("upgrade-test-cephfs")
var (
c clientset.Interface
pvc *v1.PersistentVolumeClaim
app *v1.Pod
// cwd stores the initial working directory.
cwd string
)
// deploy cephfs CSI
BeforeEach(func() {
if !upgradeTesting || !testCephFS {
Skip("Skipping CephFS Upgrade Test")
}
c = f.ClientSet
if cephCSINamespace != defaultNs {
err := createNamespace(c, cephCSINamespace)
if err != nil {
Fail(err.Error())
}
}
// fetch current working directory to switch back
// when we are done upgrading.
var err error
cwd, err = os.Getwd()
if err != nil {
Fail(err.Error())
}
err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
if err != nil {
Fail(err.Error())
}
createConfigMap(cephfsDirPath, f.ClientSet, f)
createCephfsSecret(f.ClientSet, f)
createCephfsStorageClass(f.ClientSet, f, true, "")
})
AfterEach(func() {
if !testCephFS || !upgradeTesting {
Skip("Skipping CephFS Upgrade Test")
}
if CurrentGinkgoTestDescription().Failed {
// log pods created by helm chart
logsCSIPods("app=ceph-csi-cephfs", c)
// log provisoner
logsCSIPods("app=csi-cephfsplugin-provisioner", c)
// log node plugin
logsCSIPods("app=csi-cephfsplugin", c)
}
deleteConfigMap(cephfsDirPath)
deleteResource(cephfsExamplePath + "secret.yaml")
deleteResource(cephfsExamplePath + "storageclass.yaml")
if deployCephFS {
deleteCephfsPlugin()
if cephCSINamespace != defaultNs {
err := deleteNamespace(c, cephCSINamespace)
if err != nil {
Fail(err.Error())
}
}
}
})
Context("Cephfs Upgrade Test", func() {
It("Cephfs Upgrade Test", func() {
By("checking provisioner deployment is running")
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}
By("checking nodeplugin deamonsets is running")
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}
By("upgrade to latest changes and verify app re-mount", func() {
// TODO: fetch pvc size from spec.
pvcSize := "2Gi"
pvcPath := cephfsExamplePath + "pvc.yaml"
appPath := cephfsExamplePath + "pod.yaml"
pvc, err = loadPVC(pvcPath)
if pvc == nil {
Fail(err.Error())
}
pvc.Namespace = f.UniqueName
e2elog.Logf("The PVC template %+v", pvc)
app, err = loadApp(appPath)
if err != nil {
Fail(err.Error())
}
app.Namespace = f.UniqueName
app.Labels = map[string]string{"app": "upgrade-testing"}
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil {
Fail(err.Error())
}
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}
deleteCephfsPlugin()
// switch back to current changes.
err = os.Chdir(cwd)
if err != nil {
Fail(err.Error())
}
deployCephfsPlugin()
app.Labels = map[string]string{"app": "upgrade-testing"}
// validate if the app gets bound to a pvc created by
// an earlier release.
err = createApp(f.ClientSet, app, deployTimeout)
if err != nil {
Fail(err.Error())
}
})
By("Resize pvc and verify expansion", func() {
var v *version.Info
pvcExpandSize := "5Gi"
v, err = f.ClientSet.Discovery().ServerVersion()
if err != nil {
e2elog.Logf("failed to get server version with error %v", err)
Fail(err.Error())
}
// Resize 0.3.0 is only supported from v1.15+
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
opt := metav1.ListOptions{
LabelSelector: "app=upgrade-testing",
}
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
Fail(err.Error())
}
// resize PVC
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
if err != nil {
Fail(err.Error())
}
// wait for application pod to come up after resize
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil {
Fail(err.Error())
}
// validate if resize is successful.
err = checkDirSize(app, f, &opt, pvcExpandSize)
if err != nil {
Fail(err.Error())
}
}
})
By("delete pvc and app")
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
Fail(err.Error())
}
})
})
})

43
e2e/upgrade.go Normal file
View File

@ -0,0 +1,43 @@
package e2e
import (
"errors"
"fmt"
"os"
"os/exec"
)
// upgradeCSI deploys a desired ceph-csi release version.
func upgradeCSI(version string) error {
tempDir := "/tmp/ceph-csi"
gitRepo := "https://github.com/ceph/ceph-csi.git"
// clone the desired release branch inside a temporary directory.
cmd := exec.Command("git", "clone", "--single-branch", "--branch", version, gitRepo, tempDir)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return fmt.Errorf("unable to clone repo %s : %w", gitRepo, err)
}
err := os.Chdir(tempDir + "/e2e")
if err != nil {
return fmt.Errorf("unable to switch directory : %w", err)
}
return nil
}
// upgradeAndDeployCSI upgrades the CSI to a specific release.
func upgradeAndDeployCSI(version, testtype string) error {
err := upgradeCSI(version)
if err != nil {
return fmt.Errorf("failed to upgrade driver %w", err)
}
switch testtype {
case "cephfs":
deployCephfsPlugin()
case "rbd":
deployRBDPlugin()
default:
return errors.New("incorrect test type, can be cephfs/rbd")
}
return nil
}

View File

@ -55,6 +55,8 @@ var (
deployRBD bool deployRBD bool
testCephFS bool testCephFS bool
testRBD bool testRBD bool
upgradeTesting bool
upgradeVersion string
cephCSINamespace string cephCSINamespace string
rookNamespace string rookNamespace string
ns string ns string
@ -353,7 +355,10 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
ClusterID: fsID, ClusterID: fsID,
Monitors: mons, Monitors: mons,
}} }}
conmap[0].CephFS.SubvolumeGroup = "e2e" if upgradeTesting {
subvolumegroup = "csi"
}
conmap[0].CephFS.SubvolumeGroup = subvolumegroup
data, err := json.Marshal(conmap) data, err := json.Marshal(conmap)
Expect(err).Should(BeNil()) Expect(err).Should(BeNil())
cm.Data["config.json"] = string(data) cm.Data["config.json"] = string(data)
@ -902,7 +907,7 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
return err return err
} }
_, stdErr := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" e2e", rookNamespace) _, stdErr := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace)
Expect(stdErr).Should(BeEmpty()) Expect(stdErr).Should(BeEmpty())
if stdErr != "" { if stdErr != "" {