mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 10:49:30 +00:00
Merge pull request #118 from ceph/devel
sync downstream devel with upstream devel
This commit is contained in:
commit
cdffaf73c6
@ -2,7 +2,7 @@
|
||||
defaults:
|
||||
actions:
|
||||
# mergify.io has removed bot_account from its free open source plan.
|
||||
comment:
|
||||
# comment:
|
||||
# bot_account: ceph-csi-bot # mergify[bot] will be commenting.
|
||||
queue:
|
||||
# merge_bot_account: ceph-csi-bot #mergify[bot] will be merging prs.
|
||||
|
@ -38,7 +38,7 @@ SNAPSHOT_VERSION=v6.0.1
|
||||
HELM_VERSION=v3.9.0
|
||||
|
||||
# minikube settings
|
||||
MINIKUBE_VERSION=v1.26.0
|
||||
MINIKUBE_VERSION=v1.26.1
|
||||
VM_DRIVER=none
|
||||
CHANGE_MINIKUBE_NONE_USER=true
|
||||
|
||||
|
@ -856,12 +856,13 @@ func checkVolumeResyncStatus(localStatus librbd.SiteMirrorImageStatus) error {
|
||||
|
||||
// If the state is Replaying means the resync is going on.
|
||||
// Once the volume on remote cluster is demoted and resync
|
||||
// is completed the image state will be moved to UNKNOWN .
|
||||
if localStatus.State != librbd.MirrorImageStatusStateReplaying &&
|
||||
localStatus.State != librbd.MirrorImageStatusStateUnknown {
|
||||
// is completed the image state will be moved to UNKNOWN.
|
||||
// RBD mirror daemon should be always running on the primary cluster.
|
||||
if !localStatus.Up || (localStatus.State != librbd.MirrorImageStatusStateReplaying &&
|
||||
localStatus.State != librbd.MirrorImageStatusStateUnknown) {
|
||||
return fmt.Errorf(
|
||||
"not resyncing. image is in %q state",
|
||||
localStatus.State)
|
||||
"not resyncing. Local status: daemon up=%t image is in %q state",
|
||||
localStatus.Up, localStatus.State)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -212,10 +212,19 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
args librbd.SiteMirrorImageStatus
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "test when rbd mirror daemon is not running",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateUnknown,
|
||||
Up: false,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "test for unknown state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateUnknown,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@ -223,6 +232,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for error state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateError,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@ -230,6 +240,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for syncing state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateSyncing,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@ -237,6 +248,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for starting_replay state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateStartingReplay,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@ -244,6 +256,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for replaying state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateReplaying,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@ -251,6 +264,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for stopping_replay state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateStoppingReplay,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@ -258,6 +272,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for stopped state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusStateStopped,
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@ -265,6 +280,7 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
||||
name: "test for invalid state",
|
||||
args: librbd.SiteMirrorImageStatus{
|
||||
State: librbd.MirrorImageStatusState(100),
|
||||
Up: true,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -84,30 +84,6 @@ function delete_snapshot_crd() {
|
||||
kubectl delete -f "${VOLUME_SNAPSHOT}" --ignore-not-found
|
||||
}
|
||||
|
||||
# parse the kubernetes version
|
||||
# v1.17.2 -> kube_version 1 -> 1 (Major)
|
||||
# v1.17.2 -> kube_version 2 -> 17 (Minor)
|
||||
function kube_version() {
|
||||
echo "${KUBE_VERSION}" | sed 's/^v//' | cut -d'.' -f"${1}"
|
||||
}
|
||||
|
||||
if ! get_kube_version=$(kubectl version --short) ||
|
||||
[[ -z "${get_kube_version}" ]]; then
|
||||
echo "could not get Kubernetes server version"
|
||||
echo "hint: check if you have specified the right host or port"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBE_VERSION=$(echo "${get_kube_version}" | grep "^Server Version" | cut -d' ' -f3)
|
||||
KUBE_MAJOR=$(kube_version 1)
|
||||
KUBE_MINOR=$(kube_version 2)
|
||||
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -lt 1 ]] || [[ "${KUBE_MAJOR}" -eq 1 && "${KUBE_MINOR}" -lt 17 ]]; then
|
||||
echo "skipping: Kubernetes server version is < 1.17.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "${1:-}" in
|
||||
install)
|
||||
install_snapshot_controller "$2"
|
||||
@ -115,13 +91,9 @@ install)
|
||||
cleanup)
|
||||
cleanup_snapshot_controller "$2"
|
||||
;;
|
||||
delete-crd)
|
||||
delete_snapshot_crd
|
||||
;;
|
||||
*)
|
||||
echo "usage:" >&2
|
||||
echo " $0 install" >&2
|
||||
echo " $0 cleanup" >&2
|
||||
echo " $0 delete-crd" >&2
|
||||
;;
|
||||
esac
|
||||
|
@ -27,21 +27,12 @@ sudo scripts/minikube.sh create-block-pool
|
||||
# pull docker images to speed up e2e
|
||||
sudo scripts/minikube.sh cephcsi
|
||||
sudo scripts/minikube.sh k8s-sidecar
|
||||
KUBE_MAJOR=$(kube_version 1)
|
||||
KUBE_MINOR=$(kube_version 2)
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
fi
|
||||
# install snapshot controller and create snapshot CRD
|
||||
scripts/install-snapshot.sh install
|
||||
|
||||
# functional tests
|
||||
make run-e2e E2E_ARGS="${*}"
|
||||
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD
|
||||
scripts/install-snapshot.sh cleanup
|
||||
fi
|
||||
# cleanup
|
||||
scripts/install-snapshot.sh cleanup
|
||||
sudo scripts/minikube.sh clean
|
||||
|
@ -35,15 +35,8 @@ sudo scripts/minikube.sh k8s-sidecar
|
||||
NAMESPACE=cephcsi-e2e-$RANDOM
|
||||
# create ns for e2e
|
||||
kubectl create ns ${NAMESPACE}
|
||||
KUBE_MAJOR=$(kube_version 1)
|
||||
KUBE_MINOR=$(kube_version 2)
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
fi
|
||||
# install snapshot controller and create snapshot CRD
|
||||
scripts/install-snapshot.sh install
|
||||
# set up helm
|
||||
scripts/install-helm.sh up
|
||||
# install cephcsi helm charts
|
||||
@ -51,12 +44,8 @@ scripts/install-helm.sh install-cephcsi --namespace ${NAMESPACE}
|
||||
# functional tests
|
||||
make run-e2e NAMESPACE="${NAMESPACE}" E2E_ARGS="--deploy-cephfs=false --deploy-rbd=false ${*}"
|
||||
|
||||
#cleanup
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD
|
||||
scripts/install-snapshot.sh cleanup
|
||||
fi
|
||||
# cleanup
|
||||
scripts/install-snapshot.sh cleanup
|
||||
scripts/install-helm.sh cleanup-cephcsi --namespace ${NAMESPACE}
|
||||
scripts/install-helm.sh clean
|
||||
kubectl delete ns ${NAMESPACE}
|
||||
|
Loading…
Reference in New Issue
Block a user