mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 11:00:25 +00:00
Add support for erasure coded pools
This commit adds support to mention dataPool parameter for the topology constrained pools in the StorageClass, that can be leveraged to mention erasure coded pool names to use for RBD data instead of the replica pools. Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
parent
3f06fedf61
commit
1a8f8e3c24
@ -46,7 +46,7 @@ the following parameters are available to configure kubernetes cluster
|
|||||||
following environment variables can be exported to customize kubernetes deployment
|
following environment variables can be exported to customize kubernetes deployment
|
||||||
|
|
||||||
| ENV | Description | Default |
|
| ENV | Description | Default |
|
||||||
| ------------------ | ------------------------------------------------ | ------------------------------------------------------------------ |
|
|----------------------|--------------------------------------------------|--------------------------------------------------------------------|
|
||||||
| MINIKUBE_VERSION | minikube version to install | latest |
|
| MINIKUBE_VERSION | minikube version to install | latest |
|
||||||
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
|
| KUBE_VERSION | kubernetes version to install | v1.14.10 |
|
||||||
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
|
| MEMORY | Amount of RAM allocated to the minikube VM in MB | 3000 |
|
||||||
|
36
e2e/rbd.go
36
e2e/rbd.go
@ -30,6 +30,7 @@ var (
|
|||||||
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
|
nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region"
|
||||||
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
|
nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone"
|
||||||
rbdTopologyPool = "newrbdpool"
|
rbdTopologyPool = "newrbdpool"
|
||||||
|
rbdTopologyDataPool = "replicapool" // NOTE: should be different than rbdTopologyPool for test to be effective
|
||||||
)
|
)
|
||||||
|
|
||||||
func deployRBDPlugin() {
|
func deployRBDPlugin() {
|
||||||
@ -125,9 +126,9 @@ var _ = Describe("RBD", func() {
|
|||||||
// deploy RBD CSI
|
// deploy RBD CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
|
if deployRBD {
|
||||||
createNodeLabel(f, nodeRegionLabel, regionValue)
|
createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||||
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||||
if deployRBD {
|
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := createNamespace(c, cephCSINamespace)
|
err := createNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -519,11 +520,44 @@ var _ = Describe("RBD", func() {
|
|||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
By("checking if data pool parameter is honored", func() {
|
||||||
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool +
|
||||||
|
"\",\"domainSegments\":" +
|
||||||
|
"[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," +
|
||||||
|
"{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]"
|
||||||
|
createRBDStorageClass(f.ClientSet, f,
|
||||||
|
map[string]string{"volumeBindingMode": "WaitForFirstConsumer"},
|
||||||
|
map[string]string{"topologyConstrainedPools": topologyConstraint})
|
||||||
|
|
||||||
|
By("creating an app using a PV from the delayed binding mode StorageClass with a data pool")
|
||||||
|
pvc, app = createPVCAndAppBinding(pvcPath, appPath, f, 0)
|
||||||
|
|
||||||
|
By("ensuring created PV has its image in the topology specific pool")
|
||||||
|
err = checkPVCImageInPool(f, pvc, rbdTopologyPool)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
By("ensuring created image has the right data pool parameter set")
|
||||||
|
err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
// cleanup and undo changes made by the test
|
// cleanup and undo changes made by the test
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// cleanup and undo changes made by the test
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
})
|
})
|
||||||
|
25
e2e/utils.go
25
e2e/utils.go
@ -1234,21 +1234,40 @@ func checkNodeHasLabel(c clientset.Interface, labelKey, labelValue string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) {
|
||||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
LabelSelector: "app=rook-ceph-tools",
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
|
stdOut, stdErr := execCommandInPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace, &opt)
|
||||||
Expect(stdErr).Should(BeEmpty())
|
Expect(stdErr).Should(BeEmpty())
|
||||||
|
|
||||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||||
|
|
||||||
|
return stdOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
_, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error {
|
||||||
|
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(stdOut, "data_pool: "+dataPool) {
|
||||||
|
return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,14 +58,17 @@ parameters:
|
|||||||
# For further information read TODO<doc>
|
# For further information read TODO<doc>
|
||||||
# topologyConstrainedPools: |
|
# topologyConstrainedPools: |
|
||||||
# [{"poolName":"pool0",
|
# [{"poolName":"pool0",
|
||||||
|
# "dataPool":"ec-pool0" # optional, erasure-coded pool for data
|
||||||
# "domainSegments":[
|
# "domainSegments":[
|
||||||
# {"domainLabel":"region","value":"east"},
|
# {"domainLabel":"region","value":"east"},
|
||||||
# {"domainLabel":"zone","value":"zone1"}]},
|
# {"domainLabel":"zone","value":"zone1"}]},
|
||||||
# {"poolName":"pool1",
|
# {"poolName":"pool1",
|
||||||
|
# "dataPool":"ec-pool1" # optional, erasure-coded pool for data
|
||||||
# "domainSegments":[
|
# "domainSegments":[
|
||||||
# {"domainLabel":"region","value":"east"},
|
# {"domainLabel":"region","value":"east"},
|
||||||
# {"domainLabel":"zone","value":"zone2"}]},
|
# {"domainLabel":"zone","value":"zone2"}]},
|
||||||
# {"poolName":"pool2",
|
# {"poolName":"pool2",
|
||||||
|
# "dataPool":"ec-pool2" # optional, erasure-coded pool for data
|
||||||
# "domainSegments":[
|
# "domainSegments":[
|
||||||
# {"domainLabel":"region","value":"west"},
|
# {"domainLabel":"region","value":"west"},
|
||||||
# {"domainLabel":"zone","value":"zone1"}]}
|
# {"domainLabel":"zone","value":"zone1"}]}
|
||||||
|
@ -110,7 +110,7 @@ func undoVolReservation(ctx context.Context, volOptions *volumeOptions, vid volu
|
|||||||
|
|
||||||
func updateTopologyConstraints(volOpts *volumeOptions) error {
|
func updateTopologyConstraints(volOpts *volumeOptions) error {
|
||||||
// update request based on topology constrained parameters (if present)
|
// update request based on topology constrained parameters (if present)
|
||||||
poolName, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
|
poolName, _, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -282,12 +282,13 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// update request based on topology constrained parameters (if present)
|
// update request based on topology constrained parameters (if present)
|
||||||
poolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
|
poolName, dataPoolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if poolName != "" {
|
if poolName != "" {
|
||||||
rbdVol.Pool = poolName
|
rbdVol.Pool = poolName
|
||||||
|
rbdVol.DataPool = dataPoolName
|
||||||
rbdVol.Topology = topology
|
rbdVol.Topology = topology
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,6 +125,7 @@ type topologySegment struct {
|
|||||||
// TopologyConstrainedPool stores the pool name and a list of its associated topology domain values
|
// TopologyConstrainedPool stores the pool name and a list of its associated topology domain values
|
||||||
type TopologyConstrainedPool struct {
|
type TopologyConstrainedPool struct {
|
||||||
PoolName string `json:"poolName"`
|
PoolName string `json:"poolName"`
|
||||||
|
DataPoolName string `json:"dataPool"`
|
||||||
DomainSegments []topologySegment `json:"domainSegments"`
|
DomainSegments []topologySegment `json:"domainSegments"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +179,7 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
|
|||||||
topologyPools, poolName)
|
topologyPools, poolName)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements)
|
_, _, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements)
|
||||||
|
|
||||||
return topology, err
|
return topology, err
|
||||||
}
|
}
|
||||||
@ -188,28 +189,28 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
|
|||||||
// The return variables are, image poolname, data poolname, and topology map of
|
// The return variables are, image poolname, data poolname, and topology map of
|
||||||
// matched requirement
|
// matched requirement
|
||||||
func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
||||||
accessibilityRequirements *csi.TopologyRequirement) (string, map[string]string, error) {
|
accessibilityRequirements *csi.TopologyRequirement) (string, string, map[string]string, error) {
|
||||||
if topologyPools == nil || accessibilityRequirements == nil {
|
if topologyPools == nil || accessibilityRequirements == nil {
|
||||||
return "", nil, nil
|
return "", "", nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// select pool that fits first topology constraint preferred requirements
|
// select pool that fits first topology constraint preferred requirements
|
||||||
for _, topology := range accessibilityRequirements.GetPreferred() {
|
for _, topology := range accessibilityRequirements.GetPreferred() {
|
||||||
poolName := matchPoolToTopology(topologyPools, topology)
|
topologyPool := matchPoolToTopology(topologyPools, topology)
|
||||||
if poolName != "" {
|
if topologyPool.PoolName != "" {
|
||||||
return poolName, topology.GetSegments(), nil
|
return topologyPool.PoolName, topologyPool.DataPoolName, topology.GetSegments(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If preferred mismatches, check requisite for a fit
|
// If preferred mismatches, check requisite for a fit
|
||||||
for _, topology := range accessibilityRequirements.GetRequisite() {
|
for _, topology := range accessibilityRequirements.GetRequisite() {
|
||||||
poolName := matchPoolToTopology(topologyPools, topology)
|
topologyPool := matchPoolToTopology(topologyPools, topology)
|
||||||
if poolName != "" {
|
if topologyPool.PoolName != "" {
|
||||||
return poolName, topology.GetSegments(), nil
|
return topologyPool.PoolName, topologyPool.DataPoolName, topology.GetSegments(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+
|
return "", "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+
|
||||||
"topology constraints : pools (%+v) requested topology (%+v)",
|
"topology constraints : pools (%+v) requested topology (%+v)",
|
||||||
*topologyPools, *accessibilityRequirements)
|
*topologyPools, *accessibilityRequirements)
|
||||||
}
|
}
|
||||||
@ -217,7 +218,7 @@ func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
|||||||
// matchPoolToTopology loops through passed in pools, and for each pool checks if all
|
// matchPoolToTopology loops through passed in pools, and for each pool checks if all
|
||||||
// requested topology segments are present and match the request, returning the first pool
|
// requested topology segments are present and match the request, returning the first pool
|
||||||
// that hence matches (or an empty string if none match)
|
// that hence matches (or an empty string if none match)
|
||||||
func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi.Topology) string {
|
func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi.Topology) TopologyConstrainedPool {
|
||||||
domainMap := extractDomainsFromlabels(topology)
|
domainMap := extractDomainsFromlabels(topology)
|
||||||
|
|
||||||
// check if any pool matches all the domain keys and values
|
// check if any pool matches all the domain keys and values
|
||||||
@ -235,10 +236,10 @@ func matchPoolToTopology(topologyPools *[]TopologyConstrainedPool, topology *csi
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return topologyPool.PoolName
|
return topologyPool
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return TopologyConstrainedPool{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractDomainsFromlabels returns the domain name map, from passed in domain segments,
|
// extractDomainsFromlabels returns the domain name map, from passed in domain segments,
|
||||||
|
@ -224,91 +224,91 @@ func TestFindPoolAndTopology(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Test nil values
|
// Test nil values
|
||||||
_, _, err = FindPoolAndTopology(nil, nil)
|
_, _, _, err = FindPoolAndTopology(nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success due to nil in-args (%v)", err)
|
t.Errorf("expected success due to nil in-args (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
poolName, _, err := FindPoolAndTopology(&validMultipleTopoPools, nil)
|
poolName, _, _, err := FindPoolAndTopology(&validMultipleTopoPools, nil)
|
||||||
if err != nil || poolName != "" {
|
if err != nil || poolName != "" {
|
||||||
t.Errorf("expected success due to nil accessibility requirements (err - %v) (poolName - %s)", err, poolName)
|
t.Errorf("expected success due to nil accessibility requirements (err - %v) (poolName - %s)", err, poolName)
|
||||||
}
|
}
|
||||||
|
|
||||||
poolName, _, err = FindPoolAndTopology(nil, &validAccReq)
|
poolName, _, _, err = FindPoolAndTopology(nil, &validAccReq)
|
||||||
if err != nil || poolName != "" {
|
if err != nil || poolName != "" {
|
||||||
t.Errorf("expected success due to nil topology pools (err - %v) (poolName - %s)", err, poolName)
|
t.Errorf("expected success due to nil topology pools (err - %v) (poolName - %s)", err, poolName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test valid accessibility requirement, with invalid topology pools values
|
// Test valid accessibility requirement, with invalid topology pools values
|
||||||
_, _, err = FindPoolAndTopology(&emptyTopoPools, &validAccReq)
|
_, _, _, err = FindPoolAndTopology(&emptyTopoPools, &validAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to empty topology pools")
|
t.Errorf("expected failure due to empty topology pools")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&emptyPoolNameTopoPools, &validAccReq)
|
_, _, _, err = FindPoolAndTopology(&emptyPoolNameTopoPools, &validAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to missing pool name in topology pools")
|
t.Errorf("expected failure due to missing pool name in topology pools")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&differentDomainsInTopoPools, &validAccReq)
|
_, _, _, err = FindPoolAndTopology(&differentDomainsInTopoPools, &validAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to mismatching domains in topology pools")
|
t.Errorf("expected failure due to mismatching domains in topology pools")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test valid topology pools, with invalid accessibility requirements
|
// Test valid topology pools, with invalid accessibility requirements
|
||||||
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &emptyAccReq)
|
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &emptyAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to empty accessibility requirements")
|
t.Errorf("expected failure due to empty accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&validSingletonTopoPools, &emptySegmentAccReq)
|
_, _, _, err = FindPoolAndTopology(&validSingletonTopoPools, &emptySegmentAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to empty segments in accessibility requirements")
|
t.Errorf("expected failure due to empty segments in accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialHigherSegmentAccReq)
|
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialHigherSegmentAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&validSingletonTopoPools, &partialLowerSegmentAccReq)
|
_, _, _, err = FindPoolAndTopology(&validSingletonTopoPools, &partialLowerSegmentAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialLowerSegmentAccReq)
|
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &partialLowerSegmentAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
t.Errorf("expected failure due to partial segments in accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = FindPoolAndTopology(&validMultipleTopoPools, &differentSegmentAccReq)
|
_, _, _, err = FindPoolAndTopology(&validMultipleTopoPools, &differentSegmentAccReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected failure due to mismatching segments in accessibility requirements")
|
t.Errorf("expected failure due to mismatching segments in accessibility requirements")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test success cases
|
// Test success cases
|
||||||
// If a pool is a superset of domains (either empty domain labels or partial), it can be selected
|
// If a pool is a superset of domains (either empty domain labels or partial), it can be selected
|
||||||
poolName, topoSegment, err := FindPoolAndTopology(&emptyDomainsInTopoPools, &validAccReq)
|
poolName, _, topoSegment, err := FindPoolAndTopology(&emptyDomainsInTopoPools, &validAccReq)
|
||||||
err = checkOutput(err, poolName, topoSegment)
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success got: (%v)", err)
|
t.Errorf("expected success got: (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
poolName, topoSegment, err = FindPoolAndTopology(&partialDomainsInTopoPools, &validAccReq)
|
poolName, _, topoSegment, err = FindPoolAndTopology(&partialDomainsInTopoPools, &validAccReq)
|
||||||
err = checkOutput(err, poolName, topoSegment)
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success got: (%v)", err)
|
t.Errorf("expected success got: (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// match in a singleton topology pools
|
// match in a singleton topology pools
|
||||||
poolName, topoSegment, err = FindPoolAndTopology(&validSingletonTopoPools, &validAccReq)
|
poolName, _, topoSegment, err = FindPoolAndTopology(&validSingletonTopoPools, &validAccReq)
|
||||||
err = checkOutput(err, poolName, topoSegment)
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success got: (%v)", err)
|
t.Errorf("expected success got: (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// match first in multiple topology pools
|
// match first in multiple topology pools
|
||||||
poolName, topoSegment, err = FindPoolAndTopology(&validMultipleTopoPools, &validAccReq)
|
poolName, _, topoSegment, err = FindPoolAndTopology(&validMultipleTopoPools, &validAccReq)
|
||||||
err = checkOutput(err, poolName, topoSegment)
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success got: (%v)", err)
|
t.Errorf("expected success got: (%v)", err)
|
||||||
@ -317,12 +317,25 @@ func TestFindPoolAndTopology(t *testing.T) {
|
|||||||
// match non-first in multiple topology pools
|
// match non-first in multiple topology pools
|
||||||
switchPoolOrder := []TopologyConstrainedPool{}
|
switchPoolOrder := []TopologyConstrainedPool{}
|
||||||
switchPoolOrder = append(switchPoolOrder, validMultipleTopoPools[1], validMultipleTopoPools[0])
|
switchPoolOrder = append(switchPoolOrder, validMultipleTopoPools[1], validMultipleTopoPools[0])
|
||||||
poolName, topoSegment, err = FindPoolAndTopology(&switchPoolOrder, &validAccReq)
|
poolName, _, topoSegment, err = FindPoolAndTopology(&switchPoolOrder, &validAccReq)
|
||||||
err = checkOutput(err, poolName, topoSegment)
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected success got: (%v)", err)
|
t.Errorf("expected success got: (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test valid dataPool return
|
||||||
|
for i := range switchPoolOrder {
|
||||||
|
switchPoolOrder[i].DataPoolName = "ec-" + switchPoolOrder[i].PoolName
|
||||||
|
}
|
||||||
|
poolName, dataPoolName, topoSegment, err := FindPoolAndTopology(&switchPoolOrder, &validAccReq)
|
||||||
|
err = checkOutput(err, poolName, topoSegment)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected success got: (%v)", err)
|
||||||
|
}
|
||||||
|
if dataPoolName != "ec-"+poolName {
|
||||||
|
t.Errorf("expected data pool to be named ec-%s, got %s", poolName, dataPoolName)
|
||||||
|
}
|
||||||
|
|
||||||
// TEST: MatchTopologyForPool
|
// TEST: MatchTopologyForPool
|
||||||
// check for non-existent pool
|
// check for non-existent pool
|
||||||
_, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1+"fuzz")
|
_, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1+"fuzz")
|
||||||
|
@ -12,6 +12,12 @@ RBD_CHART_NAME="ceph-csi-rbd"
|
|||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
DEPLOY_TIMEOUT=600
|
DEPLOY_TIMEOUT=600
|
||||||
|
|
||||||
|
# ceph-csi specific variables
|
||||||
|
NODE_LABEL_REGION="test.failure-domain/region"
|
||||||
|
NODE_LABEL_ZONE="test.failure-domain/zone"
|
||||||
|
REGION_VALUE="testregion"
|
||||||
|
ZONE_VALUE="testzone"
|
||||||
|
|
||||||
function check_deployment_status() {
|
function check_deployment_status() {
|
||||||
LABEL=$1
|
LABEL=$1
|
||||||
NAMESPACE=$2
|
NAMESPACE=$2
|
||||||
@ -130,6 +136,13 @@ install_cephcsi_helm_charts() {
|
|||||||
if [ -z "$NAMESPACE" ]; then
|
if [ -z "$NAMESPACE" ]; then
|
||||||
NAMESPACE="default"
|
NAMESPACE="default"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# label the nodes uniformly for domain information
|
||||||
|
for node in $(kubectl get node -o jsonpath='{.items[*].metadata.name}'); do
|
||||||
|
kubectl label node/"${node}" ${NODE_LABEL_REGION}=${REGION_VALUE}
|
||||||
|
kubectl label node/"${node}" ${NODE_LABEL_ZONE}=${ZONE_VALUE}
|
||||||
|
done
|
||||||
|
|
||||||
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
||||||
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
|
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
|
||||||
|
|
||||||
@ -139,7 +152,7 @@ install_cephcsi_helm_charts() {
|
|||||||
# deleting configmap as a workaround to avoid configmap already present
|
# deleting configmap as a workaround to avoid configmap already present
|
||||||
# issue when installing ceph-csi-rbd
|
# issue when installing ceph-csi-rbd
|
||||||
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
|
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
|
||||||
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
|
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}"
|
||||||
|
|
||||||
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
|
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
|
||||||
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
|
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
|
||||||
@ -149,6 +162,13 @@ install_cephcsi_helm_charts() {
|
|||||||
cleanup_cephcsi_helm_charts() {
|
cleanup_cephcsi_helm_charts() {
|
||||||
"${HELM}" del --purge ${CEPHFS_CHART_NAME}
|
"${HELM}" del --purge ${CEPHFS_CHART_NAME}
|
||||||
"${HELM}" del --purge ${RBD_CHART_NAME}
|
"${HELM}" del --purge ${RBD_CHART_NAME}
|
||||||
|
|
||||||
|
# remove set labels
|
||||||
|
for node in $(kubectl get node --no-headers | cut -f 1 -d ' '); do
|
||||||
|
kubectl label node/"$node" test.failure-domain/region-
|
||||||
|
kubectl label node/"$node" test.failure-domain/zone-
|
||||||
|
done
|
||||||
|
# TODO/LATER we could remove the CSI labels that would have been set as well
|
||||||
}
|
}
|
||||||
|
|
||||||
helm_reset() {
|
helm_reset() {
|
||||||
|
@ -25,7 +25,7 @@ function deploy_rook() {
|
|||||||
|
|
||||||
# Check if CephBlockPool is empty
|
# Check if CephBlockPool is empty
|
||||||
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
|
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
|
||||||
check_rbd_stat
|
check_rbd_stat ""
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,25 +44,7 @@ function create_block_pool() {
|
|||||||
kubectl create -f "./newpool.yaml"
|
kubectl create -f "./newpool.yaml"
|
||||||
rm -f "./newpool.yaml"
|
rm -f "./newpool.yaml"
|
||||||
|
|
||||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
check_rbd_stat "$ROOK_BLOCK_POOL_NAME"
|
||||||
echo "Checking RBD ($ROOK_BLOCK_POOL_NAME) stats... ${retry}s" && sleep 5
|
|
||||||
|
|
||||||
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
|
||||||
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
|
|
||||||
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
|
|
||||||
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
|
|
||||||
|
|
||||||
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$ROOK_BLOCK_POOL_NAME" &>/dev/null; then
|
|
||||||
echo "RBD ($ROOK_BLOCK_POOL_NAME) is successfully created..."
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
|
|
||||||
echo "[Timeout] Failed to get RBD pool $ROOK_BLOCK_POOL_NAME stats"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function delete_block_pool() {
|
function delete_block_pool() {
|
||||||
@ -122,7 +104,11 @@ function check_mds_stat() {
|
|||||||
|
|
||||||
function check_rbd_stat() {
|
function check_rbd_stat() {
|
||||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||||
|
if [ -z "$1" ]; then
|
||||||
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
|
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
|
||||||
|
else
|
||||||
|
RBD_POOL_NAME=$1
|
||||||
|
fi
|
||||||
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
|
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
|
||||||
|
|
||||||
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
||||||
|
@ -6,6 +6,7 @@ set -e
|
|||||||
export KUBE_VERSION=$1
|
export KUBE_VERSION=$1
|
||||||
sudo scripts/minikube.sh up
|
sudo scripts/minikube.sh up
|
||||||
sudo scripts/minikube.sh deploy-rook
|
sudo scripts/minikube.sh deploy-rook
|
||||||
|
sudo scripts/minikube.sh create-block-pool
|
||||||
# pull docker images to speed up e2e
|
# pull docker images to speed up e2e
|
||||||
sudo scripts/minikube.sh cephcsi
|
sudo scripts/minikube.sh cephcsi
|
||||||
sudo scripts/minikube.sh k8s-sidecar
|
sudo scripts/minikube.sh k8s-sidecar
|
||||||
|
Loading…
Reference in New Issue
Block a user