mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 08:10:20 +00:00
Move rook-deploy code from e2e to ./scripts/minikube.sh
We have the e2e test with --deploy-rook=true that makes all test environment. It works fine, but It does not seem to be the role of e2e test. In addition, when developing the code we need to run full test scenario with deploying rook every time, or we need to build rook environment by hand. Move rook-deploy code to minikube.sh.
This commit is contained in:
parent
685e2540a8
commit
2c9d711463
@ -36,6 +36,8 @@ the following parameters are available to configure kubernetes cluster
|
|||||||
| down | Stops a running local kubernetes cluster |
|
| down | Stops a running local kubernetes cluster |
|
||||||
| clean | Deletes a local kubernetes cluster |
|
| clean | Deletes a local kubernetes cluster |
|
||||||
| ssh | Log into or run a command on a minikube machine with SSH |
|
| ssh | Log into or run a command on a minikube machine with SSH |
|
||||||
|
| deploy-rook | Deploy rook to minikube |
|
||||||
|
| clean-rook | Deletes a rook from minikube |
|
||||||
| cephcsi | Copy built docker images to kubernetes cluster |
|
| cephcsi | Copy built docker images to kubernetes cluster |
|
||||||
| k8s-sidecar | Copy kubernetes sidecar docker images to kubernetes cluster |
|
| k8s-sidecar | Copy kubernetes sidecar docker images to kubernetes cluster |
|
||||||
|
|
||||||
@ -70,8 +72,6 @@ are available while running tests:
|
|||||||
|
|
||||||
| flag | description |
|
| flag | description |
|
||||||
| -------------- | ----------------------------------------------------------------------------- |
|
| -------------- | ----------------------------------------------------------------------------- |
|
||||||
| rook-version | Rook version to pull yaml files to deploy rook operator (default: v1.1.2) |
|
|
||||||
| deploy-rook | Deploy rook operator to create ceph cluster (default: true) |
|
|
||||||
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10) |
|
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10) |
|
||||||
| kubeconfig | Path to kubeconfig containing embedded authinfo (default: $HOME/.kube/config) |
|
| kubeconfig | Path to kubeconfig containing embedded authinfo (default: $HOME/.kube/config) |
|
||||||
| timeout | Panic test binary after duration d (default 0, timeout disabled) |
|
| timeout | Panic test binary after duration d (default 0, timeout disabled) |
|
||||||
@ -88,11 +88,11 @@ cluster or you can pass `kubeconfig`flag while running tests.
|
|||||||
Functional tests are run by the `go test` command.
|
Functional tests are run by the `go test` command.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$go test ./e2e/ --rook-version="v1.0.1" --deploy-rook=true -timeout=20m -v
|
$go test ./e2e/ -timeout=20m -v
|
||||||
```
|
```
|
||||||
|
|
||||||
Functional tests can be invoked by `make` command
|
Functional tests can be invoked by `make` command
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$make func-test TESTOPTIONS="--rook-version=v1.0.1 --deploy-rook=true --deploy-timeout=10 -timeout=30m -v"
|
$make func-test TESTOPTIONS="--deploy-timeout=10 -timeout=30m -v"
|
||||||
```
|
```
|
||||||
|
@ -60,7 +60,6 @@ var _ = Describe("cephfs", func() {
|
|||||||
// deploy cephfs CSI
|
// deploy cephfs CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
updateCephfsDirPath(f.ClientSet)
|
updateCephfsDirPath(f.ClientSet)
|
||||||
createFileSystem(f.ClientSet)
|
|
||||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||||
deployCephfsPlugin()
|
deployCephfsPlugin()
|
||||||
createCephfsSecret(f.ClientSet, f)
|
createCephfsSecret(f.ClientSet, f)
|
||||||
@ -71,7 +70,6 @@ var _ = Describe("cephfs", func() {
|
|||||||
deleteConfigMap(cephfsDirPath)
|
deleteConfigMap(cephfsDirPath)
|
||||||
deleteResource(cephfsExamplePath + "secret.yaml")
|
deleteResource(cephfsExamplePath + "secret.yaml")
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
deleteFileSystem()
|
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Test cephfs CSI", func() {
|
Context("Test cephfs CSI", func() {
|
||||||
|
@ -1,189 +0,0 @@
|
|||||||
package e2e
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
. "github.com/onsi/gomega" // nolint
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
rookURL = "https://raw.githubusercontent.com/rook/rook/$version/cluster/examples/kubernetes/ceph"
|
|
||||||
)
|
|
||||||
|
|
||||||
var rookNS = "rook-ceph"
|
|
||||||
|
|
||||||
func formRookURL(version string) {
|
|
||||||
rookURL = strings.Replace(rookURL, "$version", version, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getK8sClient() kubernetes.Interface {
|
|
||||||
e2elog.Logf("Creating a kubernetes client")
|
|
||||||
client, err := framework.LoadClientset()
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
return client
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func deployCommon() {
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "common.yaml")
|
|
||||||
framework.RunKubectlOrDie("create", "-f", commonPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createFileSystem(c kubernetes.Interface) {
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "filesystem-test.yaml")
|
|
||||||
framework.RunKubectlOrDie("create", "-f", commonPath)
|
|
||||||
opt := &metav1.ListOptions{
|
|
||||||
LabelSelector: "app=rook-ceph-mds",
|
|
||||||
}
|
|
||||||
err := checkCephPods(rookNS, c, 1, deployTimeout, opt)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
func createRBDPool() {
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "pool-test.yaml")
|
|
||||||
framework.RunKubectlOrDie("create", "-f", commonPath)
|
|
||||||
}
|
|
||||||
func deleteFileSystem() {
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "filesystem-test.yaml")
|
|
||||||
_, err := framework.RunKubectl("delete", "-f", commonPath)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete file-system %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteRBDPool() {
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "pool-test.yaml")
|
|
||||||
_, err := framework.RunKubectl("delete", "-f", commonPath)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete pool %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func deployOperator(c kubernetes.Interface) {
|
|
||||||
opPath := fmt.Sprintf("%s/%s", rookURL, "operator.yaml")
|
|
||||||
|
|
||||||
_, err := framework.RunKubectl("create", "-f", opPath)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
err = waitForDaemonSets("rook-discover", rookNS, c, deployTimeout)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
err = waitForDeploymentComplete("rook-ceph-operator", rookNS, c, deployTimeout)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
func deployCluster(c kubernetes.Interface) {
|
|
||||||
opPath := fmt.Sprintf("%s/%s", rookURL, "cluster-test.yaml")
|
|
||||||
framework.RunKubectlOrDie("create", "-f", opPath)
|
|
||||||
|
|
||||||
// After rook-ceph v1.1.0, flex driver is disabled. We don't need to wait for rook-ceph-agent.
|
|
||||||
if !strings.EqualFold(RookVersion, "master") && isOlderRookVersionThan(RookVersion, "v1.1.1") {
|
|
||||||
err := waitForDaemonSets("rook-ceph-agent", rookNS, c, deployTimeout)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
opt := &metav1.ListOptions{
|
|
||||||
LabelSelector: "app=rook-ceph-mon",
|
|
||||||
}
|
|
||||||
err := checkCephPods(rookNS, c, 1, deployTimeout, opt)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
func isOlderRookVersionThan(targetVersion, compareToVersion string) bool {
|
|
||||||
rv := extractRookVersion(targetVersion)
|
|
||||||
cv := extractRookVersion(compareToVersion)
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
if rv[i] < cv[i] {
|
|
||||||
return true
|
|
||||||
} else if rv[i] > cv[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract rook version that form is v1.3.2-beta
|
|
||||||
func extractRookVersion(versionString string) []int {
|
|
||||||
reg := regexp.MustCompile(`^v(\d+).(\d+).(\d+)`)
|
|
||||||
parsedVersionString := reg.FindStringSubmatch(versionString)
|
|
||||||
Expect(len(parsedVersionString)).Should(BeNumerically(">=", 4))
|
|
||||||
|
|
||||||
var version []int
|
|
||||||
for i := 1; i < 4; i++ {
|
|
||||||
j, err := strconv.Atoi(parsedVersionString[i])
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
|
|
||||||
version = append(version, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
return version
|
|
||||||
}
|
|
||||||
|
|
||||||
func deployToolBox(c kubernetes.Interface) {
|
|
||||||
opPath := fmt.Sprintf("%s/%s", rookURL, "toolbox.yaml")
|
|
||||||
framework.RunKubectlOrDie("create", "-f", opPath)
|
|
||||||
opt := &metav1.ListOptions{
|
|
||||||
LabelSelector: "app=rook-ceph-tools",
|
|
||||||
}
|
|
||||||
|
|
||||||
name := getPodName(rookNS, c, opt)
|
|
||||||
err := waitForPodInRunningState(name, rookNS, c, deployTimeout)
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
waitforToolBoX(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// this is a workaround, as we are hitting "unable to get monitor info from DNS SRV with service name: ceph-mon"
|
|
||||||
func waitforToolBoX(name string) {
|
|
||||||
cmd := []string{"logs", "-nrook-ceph", name}
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
resp, err := framework.RunKubectl(cmd...)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to get logs %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(resp, "=") {
|
|
||||||
e2elog.Logf("malformed monitor configuration %+v", resp)
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.TrimRight(resp[strings.LastIndex(resp, "=")+1:], "\n") != "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
e2elog.Logf("monitor list is empty in ceph.conf %v", resp)
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func deployRook() {
|
|
||||||
c := getK8sClient()
|
|
||||||
deployCommon()
|
|
||||||
deployOperator(c)
|
|
||||||
deployCluster(c)
|
|
||||||
deployToolBox(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tearDownRook() {
|
|
||||||
opPath := fmt.Sprintf("%s/%s", rookURL, "cluster-test.yaml")
|
|
||||||
framework.Cleanup(opPath, rookNS, "app=rook-ceph-mon")
|
|
||||||
opPath = fmt.Sprintf("%s/%s", rookURL, "toolbox.yaml")
|
|
||||||
framework.Cleanup(opPath, rookNS, "app=rook-ceph-tools")
|
|
||||||
|
|
||||||
opPath = fmt.Sprintf("%s/%s", rookURL, "operator.yaml")
|
|
||||||
// TODO need to add selector for cleanup validation
|
|
||||||
framework.Cleanup(opPath, rookNS)
|
|
||||||
commonPath := fmt.Sprintf("%s/%s", rookURL, "common.yaml")
|
|
||||||
_, err := framework.RunKubectl("delete", "-f", commonPath)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete rook common %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -11,20 +11,15 @@ import (
|
|||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
RookVersion string
|
|
||||||
rookRequired bool
|
|
||||||
deployTimeout int
|
deployTimeout int
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
log.SetOutput(GinkgoWriter)
|
log.SetOutput(GinkgoWriter)
|
||||||
flag.StringVar(&RookVersion, "rook-version", "v1.1.2", "rook version to pull yaml files")
|
|
||||||
|
|
||||||
flag.BoolVar(&rookRequired, "deploy-rook", true, "deploy rook on kubernetes")
|
|
||||||
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
||||||
|
|
||||||
setDefaultKubeconfig()
|
setDefaultKubeconfig()
|
||||||
@ -33,7 +28,6 @@ func init() {
|
|||||||
framework.HandleFlags()
|
framework.HandleFlags()
|
||||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||||
|
|
||||||
formRookURL(RookVersion)
|
|
||||||
fmt.Println("timeout for deploytimeout ", deployTimeout)
|
fmt.Println("timeout for deploytimeout ", deployTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,74 +39,12 @@ func setDefaultKubeconfig() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeCephCSIResource is a temporary fix for CI to remove the ceph-csi resources deployed by rook
|
|
||||||
func removeCephCSIResource() {
|
|
||||||
// cleanup rbd and cephfs deamonset deployed by rook
|
|
||||||
_, err := framework.RunKubectl("delete", "-nrook-ceph", "daemonset", "csi-cephfsplugin")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete rbd daemonset %v", err)
|
|
||||||
}
|
|
||||||
_, err = framework.RunKubectl("delete", "-nrook-ceph", "daemonset", "csi-rbdplugin")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete cephfs daemonset %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if kube version is <1.14.0 rook deploys cephfs and rbd provisioner as statefulset
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-nrook-ceph", "statefulset", "csi-rbdplugin-provisioner")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete rbd statefulset %v", err)
|
|
||||||
}
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-nrook-ceph", "statefulset", "csi-cephfsplugin-provisioner")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete cephfs statefulset %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if kube version is >=1.14.0 rook deploys cephfs and rbd provisioner as deployment
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-nrook-ceph", "deployment", "csi-rbdplugin-provisioner")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete rbd deployment %v", err)
|
|
||||||
}
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-nrook-ceph", "deployment", "csi-cephfsplugin-provisioner")
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete cephfs deployment %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup rbd cluster roles deployed by rook
|
|
||||||
rbdPath := fmt.Sprintf("%s/%s/", rbdDirPath, "v1.13")
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-f", rbdPath+rbdProvisionerRBAC)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete provisioner rbac %v", err)
|
|
||||||
}
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-f", rbdPath+rbdNodePluginRBAC)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete nodeplugin rbac %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup cephfs cluster roles deployed by rook
|
|
||||||
cephfsPath := fmt.Sprintf("%s/%s/", cephfsDirPath, "v1.13")
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-f", cephfsPath+cephfsProvisionerRBAC)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete provisioner rbac %v", err)
|
|
||||||
}
|
|
||||||
_, err = framework.RunKubectl("delete", "--ignore-not-found", "-f", cephfsPath+cephfsNodePluginRBAC)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("failed to delete nodeplugin rbac %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeforeSuite deploys the rook-operator and ceph cluster
|
|
||||||
var _ = BeforeSuite(func() {
|
var _ = BeforeSuite(func() {
|
||||||
if rookRequired {
|
|
||||||
deployRook()
|
|
||||||
removeCephCSIResource()
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// AfterSuite removes the rook-operator and ceph cluster
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
if rookRequired {
|
|
||||||
tearDownRook()
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func TestE2E(t *testing.T) {
|
func TestE2E(t *testing.T) {
|
||||||
|
@ -62,7 +62,6 @@ var _ = Describe("RBD", func() {
|
|||||||
// deploy RBD CSI
|
// deploy RBD CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
updaterbdDirPath(f.ClientSet)
|
updaterbdDirPath(f.ClientSet)
|
||||||
createRBDPool()
|
|
||||||
createConfigMap(rbdDirPath, f.ClientSet, f)
|
createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||||
deployRBDPlugin()
|
deployRBDPlugin()
|
||||||
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
createRBDStorageClass(f.ClientSet, f, make(map[string]string))
|
||||||
@ -73,7 +72,6 @@ var _ = Describe("RBD", func() {
|
|||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
deleteRBDPlugin()
|
deleteRBDPlugin()
|
||||||
deleteConfigMap(rbdDirPath)
|
deleteConfigMap(rbdDirPath)
|
||||||
deleteRBDPool()
|
|
||||||
deleteResource(rbdExamplePath + "secret.yaml")
|
deleteResource(rbdExamplePath + "secret.yaml")
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
// deleteResource(rbdExamplePath + "snapshotclass.yaml")
|
// deleteResource(rbdExamplePath + "snapshotclass.yaml")
|
||||||
|
41
e2e/utils.go
41
e2e/utils.go
@ -30,6 +30,10 @@ import (
|
|||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rookNS = "rook-ceph"
|
||||||
|
)
|
||||||
|
|
||||||
var poll = 2 * time.Second
|
var poll = 2 * time.Second
|
||||||
|
|
||||||
// type snapInfo struct {
|
// type snapInfo struct {
|
||||||
@ -460,22 +464,6 @@ func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
|
|||||||
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout)
|
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodName(ns string, c kubernetes.Interface, opt *metav1.ListOptions) string {
|
|
||||||
ticker := time.NewTicker(1 * time.Second)
|
|
||||||
// TODO add stop logic
|
|
||||||
for range ticker.C {
|
|
||||||
podList, err := c.CoreV1().Pods(ns).List(*opt)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
Expect(podList.Items).NotTo(BeNil())
|
|
||||||
Expect(err).Should(BeNil())
|
|
||||||
|
|
||||||
if len(podList.Items) != 0 {
|
|
||||||
return podList.Items[0].Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error {
|
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@ -532,27 +520,6 @@ func unmarshal(fileName string, obj interface{}) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCephPods(ns string, c kubernetes.Interface, count, t int, opt *metav1.ListOptions) error {
|
|
||||||
timeout := time.Duration(t) * time.Minute
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
|
||||||
podList, err := c.CoreV1().Pods(ns).List(*opt)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
e2elog.Logf("pod count is %d expected count %d (%d seconds elapsed)", len(podList.Items), count, int(time.Since(start).Seconds()))
|
|
||||||
|
|
||||||
if len(podList.Items) >= count {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// createPVCAndApp creates pvc and pod
|
// createPVCAndApp creates pvc and pod
|
||||||
// if name is not empty same will be set as pvc and app name
|
// if name is not empty same will be set as pvc and app name
|
||||||
func createPVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
|
func createPVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error {
|
||||||
|
@ -61,6 +61,11 @@ VM_DRIVER=${VM_DRIVER:-"virtualbox"}
|
|||||||
#configure image repo
|
#configure image repo
|
||||||
CEPHCSI_IMAGE_REPO=${CEPHCSI_IMAGE_REPO:-"quay.io/cephcsi"}
|
CEPHCSI_IMAGE_REPO=${CEPHCSI_IMAGE_REPO:-"quay.io/cephcsi"}
|
||||||
K8S_IMAGE_REPO=${K8S_IMAGE_REPO:-"quay.io/k8scsi"}
|
K8S_IMAGE_REPO=${K8S_IMAGE_REPO:-"quay.io/k8scsi"}
|
||||||
|
DISK="sda1"
|
||||||
|
if [[ "${VM_DRIVER}" == "kvm2" ]]; then
|
||||||
|
# use vda1 instead of sda1 when running with the libvirt driver
|
||||||
|
DISK="vda1"
|
||||||
|
fi
|
||||||
|
|
||||||
#feature-gates for kube
|
#feature-gates for kube
|
||||||
K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true"}
|
K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"BlockVolume=true,CSIBlockVolume=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true"}
|
||||||
@ -77,11 +82,6 @@ up)
|
|||||||
echo "starting minikube with kubeadm bootstrapper"
|
echo "starting minikube with kubeadm bootstrapper"
|
||||||
minikube start --memory="${MEMORY}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --vm-driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}"
|
minikube start --memory="${MEMORY}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --vm-driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}"
|
||||||
|
|
||||||
DISK="sda1"
|
|
||||||
if [[ "${VM_DRIVER}" == "kvm2" ]]; then
|
|
||||||
# use vda1 instead of sda1 when running with the libvirt driver
|
|
||||||
DISK="vda1"
|
|
||||||
fi
|
|
||||||
# create a link so the default dataDirHostPath will work for this
|
# create a link so the default dataDirHostPath will work for this
|
||||||
# environment
|
# environment
|
||||||
if [[ "${VM_DRIVER}" != "none" ]]; then
|
if [[ "${VM_DRIVER}" != "none" ]]; then
|
||||||
@ -99,6 +99,18 @@ ssh)
|
|||||||
echo "connecting to minikube"
|
echo "connecting to minikube"
|
||||||
minikube ssh
|
minikube ssh
|
||||||
;;
|
;;
|
||||||
|
deploy-rook)
|
||||||
|
echo "deploy rook"
|
||||||
|
./scripts/rook.sh deploy
|
||||||
|
;;
|
||||||
|
teardown-rook)
|
||||||
|
echo "teardown rook"
|
||||||
|
./scripts/rook.sh teardown
|
||||||
|
|
||||||
|
# delete rook data for minikube
|
||||||
|
minikube ssh "sudo rm -rf /mnt/${DISK}/var/lib/rook; sudo rm -rf /var/lib/rook"
|
||||||
|
minikube ssh "sudo mkdir -p /mnt/${DISK}/var/lib/rook; sudo ln -s /mnt/${DISK}/var/lib/rook /var/lib/rook"
|
||||||
|
;;
|
||||||
cephcsi)
|
cephcsi)
|
||||||
echo "copying the cephcsi image"
|
echo "copying the cephcsi image"
|
||||||
copy_image_to_cluster "${CEPHCSI_IMAGE_REPO}"/cephcsi:canary "${CEPHCSI_IMAGE_REPO}"/cephcsi:canary
|
copy_image_to_cluster "${CEPHCSI_IMAGE_REPO}"/cephcsi:canary "${CEPHCSI_IMAGE_REPO}"/cephcsi:canary
|
||||||
@ -120,6 +132,8 @@ Available Commands:
|
|||||||
down Stops a running local kubernetes cluster
|
down Stops a running local kubernetes cluster
|
||||||
clean Deletes a local kubernetes cluster
|
clean Deletes a local kubernetes cluster
|
||||||
ssh Log into or run a command on a minikube machine with SSH
|
ssh Log into or run a command on a minikube machine with SSH
|
||||||
|
deploy-rook Deploy rook to minikube
|
||||||
|
teardown-rook Teardown a rook from minikube
|
||||||
cephcsi copy built docker images to kubernetes cluster
|
cephcsi copy built docker images to kubernetes cluster
|
||||||
k8s-sidecar copy kubernetes sidecar docker images to kubernetes cluster
|
k8s-sidecar copy kubernetes sidecar docker images to kubernetes cluster
|
||||||
" >&2
|
" >&2
|
||||||
|
48
scripts/rook.sh
Executable file
48
scripts/rook.sh
Executable file
@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
ROOK_VERSION=${ROOK_VERSION:-"v1.1.7"}
|
||||||
|
ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300}
|
||||||
|
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph"
|
||||||
|
|
||||||
|
function deploy_rook() {
|
||||||
|
kubectl create -f "${ROOK_URL}/common.yaml"
|
||||||
|
kubectl create -f "${ROOK_URL}/operator.yaml"
|
||||||
|
kubectl create -f "${ROOK_URL}/cluster-test.yaml"
|
||||||
|
kubectl create -f "${ROOK_URL}/toolbox.yaml"
|
||||||
|
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
|
||||||
|
kubectl create -f "${ROOK_URL}/pool-test.yaml"
|
||||||
|
|
||||||
|
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||||
|
echo "Wait for rook deploy... ${retry}s"
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
if kubectl get cephclusters -n rook-ceph | grep HEALTH_OK &> /dev/null; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function teardown_rook() {
|
||||||
|
kubectl delete -f "${ROOK_URL}/pool-test.yaml"
|
||||||
|
kubectl delete -f "${ROOK_URL}/filesystem-test.yaml"
|
||||||
|
kubectl delete -f "${ROOK_URL}/toolbox.yaml"
|
||||||
|
kubectl delete -f "${ROOK_URL}/cluster-test.yaml"
|
||||||
|
kubectl delete -f "${ROOK_URL}/operator.yaml"
|
||||||
|
kubectl delete -f "${ROOK_URL}/common.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
deploy)
|
||||||
|
deploy_rook
|
||||||
|
;;
|
||||||
|
teardown)
|
||||||
|
teardown_rook
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo " $0 [command]
|
||||||
|
Available Commands:
|
||||||
|
deploy Deploy a rook
|
||||||
|
teardown Teardown a rook
|
||||||
|
" >&2
|
||||||
|
;;
|
||||||
|
esac
|
@ -5,12 +5,13 @@ set -e
|
|||||||
# against different kuberentes version
|
# against different kuberentes version
|
||||||
export KUBE_VERSION=$1
|
export KUBE_VERSION=$1
|
||||||
sudo scripts/minikube.sh up
|
sudo scripts/minikube.sh up
|
||||||
|
sudo scripts/minikube.sh deploy-rook
|
||||||
# pull docker images to speed up e2e
|
# pull docker images to speed up e2e
|
||||||
sudo scripts/minikube.sh cephcsi
|
sudo scripts/minikube.sh cephcsi
|
||||||
sudo scripts/minikube.sh k8s-sidecar
|
sudo scripts/minikube.sh k8s-sidecar
|
||||||
sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
|
sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
|
||||||
# functional tests
|
# functional tests
|
||||||
|
|
||||||
go test github.com/ceph/ceph-csi/e2e --rook-version=v1.1.0 --deploy-rook=true --deploy-timeout=10 -timeout=30m -v
|
go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m -v
|
||||||
|
|
||||||
sudo scripts/minikube.sh clean
|
sudo scripts/minikube.sh clean
|
||||||
|
Loading…
Reference in New Issue
Block a user