Fix mon endpoint issue in E2E

in toolbox mon endpoints are not
updated properly, this is causing an issue in E2E
this PR is a workaround to fix this issue.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2019-09-04 09:55:40 +05:30 committed by mergify[bot]
parent 677be13b11
commit 64ca401a51
4 changed files with 26 additions and 17 deletions

View File

@ -61,7 +61,6 @@ var _ = Describe("cephfs", func() {
BeforeEach(func() { BeforeEach(func() {
updateCephfsDirPath(f.ClientSet) updateCephfsDirPath(f.ClientSet)
createFileSystem(f.ClientSet) createFileSystem(f.ClientSet)
waitTillMonsAreUp(f)
createConfigMap(cephfsDirPath, f.ClientSet, f) createConfigMap(cephfsDirPath, f.ClientSet, f)
deployCephfsPlugin() deployCephfsPlugin()
createCephfsSecret(f.ClientSet, f) createCephfsSecret(f.ClientSet, f)

View File

@ -3,6 +3,7 @@ package e2e
import ( import (
"fmt" "fmt"
"strings" "strings"
"time"
. "github.com/onsi/gomega" // nolint . "github.com/onsi/gomega" // nolint
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -97,6 +98,31 @@ func deployToolBox(c kubernetes.Interface) {
name := getPodName(rookNS, c, opt) name := getPodName(rookNS, c, opt)
err := waitForPodInRunningState(name, rookNS, c, deployTimeout) err := waitForPodInRunningState(name, rookNS, c, deployTimeout)
Expect(err).Should(BeNil()) Expect(err).Should(BeNil())
waitforToolBoX(name)
}
// this is a workaround, as we are hitting "unable to get monitor info from DNS SRV with service name: ceph-mon"
func waitforToolBoX(name string) {
cmd := []string{"logs", "-nrook-ceph", name}
for i := 0; i < 20; i++ {
resp, err := framework.RunKubectl(cmd...)
if err != nil {
e2elog.Logf("failed to get logs %v", err)
continue
}
if !strings.Contains(resp, "=") {
e2elog.Logf("malformed monitor configuration %+v", resp)
time.Sleep(10 * time.Second)
continue
}
if strings.TrimRight(resp[strings.LastIndex(resp, "=")+1:], "\n") != "" {
break
}
e2elog.Logf("monitor list is empty in ceph.conf %v", resp)
time.Sleep(10 * time.Second)
}
} }
func deployRook() { func deployRook() {

View File

@ -63,7 +63,6 @@ var _ = Describe("RBD", func() {
BeforeEach(func() { BeforeEach(func() {
updaterbdDirPath(f.ClientSet) updaterbdDirPath(f.ClientSet)
createRBDPool() createRBDPool()
waitTillMonsAreUp(f)
createConfigMap(rbdDirPath, f.ClientSet, f) createConfigMap(rbdDirPath, f.ClientSet, f)
deployRBDPlugin() deployRBDPlugin()
createRBDStorageClass(f.ClientSet, f) createRBDStorageClass(f.ClientSet, f)

View File

@ -193,21 +193,6 @@ func getStorageClass(path string) scv1.StorageClass {
// return sc // return sc
// } // }
// this is a workaround, as we are hitting "unable to get monitor info from DNS SRV with service name: ceph-mon"
func waitTillMonsAreUp(f *framework.Framework) {
opt := metav1.ListOptions{
LabelSelector: "app=rook-ceph-tools",
}
for i := 0; i < 10; i++ {
_, err := execCommandInPod(f, "ceph fsid", rookNS, &opt)
if err != "" {
time.Sleep(10 * time.Second)
continue
}
break
}
}
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool) { func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool) {
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml") scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
sc := getStorageClass(scPath) sc := getStorageClass(scPath)