Merge pull request #17 from ceph/devel

Sync rhs:devel with ceph:devel
This commit is contained in:
openshift-ci[bot] 2021-08-18 13:46:00 +00:00 committed by GitHub
commit 7fa76e49c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 169 additions and 66 deletions

View File

@ -101,6 +101,7 @@ in the Kubernetes documentation.
| Ceph CSI Release/Branch | Container image name | Image Tag | | Ceph CSI Release/Branch | Container image name | Image Tag |
| ----------------------- | ---------------------------- | --------- | | ----------------------- | ---------------------------- | --------- |
| devel (Branch) | quay.io/cephcsi/cephcsi | canary | | devel (Branch) | quay.io/cephcsi/cephcsi | canary |
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
| v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 | | v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 |
| v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 | | v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 |
| v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 | | v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 |
@ -158,7 +159,7 @@ welcome and encouraged to join.
Please use the following to reach members of the community: Please use the following to reach members of the community:
- Slack: Join our [slack channel](https://cephcsi.slack.com) to discuss - Slack: Join our [slack channel](https://cephcsi.slack.com) to discuss
about anything related to this project. You can join the slack by anything related to this project. You can join the slack by
this [invite link](https://bit.ly/2MeS4KY ) this [invite link](https://bit.ly/2MeS4KY )
- Forums: [ceph-csi](https://groups.google.com/forum/#!forum/ceph-csi) - Forums: [ceph-csi](https://groups.google.com/forum/#!forum/ceph-csi)
- Twitter: [@CephCsi](https://twitter.com/CephCsi) - Twitter: [@CephCsi](https://twitter.com/CephCsi)

View File

@ -48,6 +48,13 @@ ROOK_VERSION=v1.6.2
# Provide ceph image path # Provide ceph image path
ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16 ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16
# CSI sidecar version
CSI_ATTACHER_VERSION=v3.3.0
CSI_SNAPSHOTTER_VERSION=v4.2.0
CSI_PROVISIONER_VERSION=v3.0.0
CSI_RESIZER_VERSION=v1.2.0
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0
# e2e settings # e2e settings
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root # - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
# permissions on the host # permissions on the host

View File

@ -80,7 +80,7 @@ nodeplugin:
registrar: registrar:
image: image:
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.2.0 tag: v2.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -161,7 +161,7 @@ provisioner:
provisioner: provisioner:
image: image:
repository: k8s.gcr.io/sig-storage/csi-provisioner repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2 tag: v3.0.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -170,7 +170,7 @@ provisioner:
enabled: true enabled: true
image: image:
repository: k8s.gcr.io/sig-storage/csi-attacher repository: k8s.gcr.io/sig-storage/csi-attacher
tag: v3.2.1 tag: v3.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -186,7 +186,7 @@ provisioner:
snapshotter: snapshotter:
image: image:
repository: k8s.gcr.io/sig-storage/csi-snapshotter repository: k8s.gcr.io/sig-storage/csi-snapshotter
tag: v4.1.1 tag: v4.2.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}

View File

@ -1,7 +1,7 @@
{{ if semverCompare ">=1.18.0-beta.1" .Capabilities.KubeVersion.Version }} {{ if semverCompare ">=1.18.0-beta.1" .Capabilities.KubeVersion.Version }}
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
{{ else }} {{ else }}
apiVersion: storage.k8s.io/betav1 apiVersion: storage.k8s.io/v1beta1
{{ end }} {{ end }}
kind: CSIDriver kind: CSIDriver
metadata: metadata:

View File

@ -62,7 +62,7 @@ rules:
{{- if .Values.topology.enabled }} {{- if .Values.topology.enabled }}
- apiGroups: [""] - apiGroups: [""]
resources: ["nodes"] resources: ["nodes"]
verbs: ["get", "list", watch"] verbs: ["get", "list","watch"]
- apiGroups: ["storage.k8s.io"] - apiGroups: ["storage.k8s.io"]
resources: ["csinodes"] resources: ["csinodes"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]

View File

@ -102,7 +102,7 @@ nodeplugin:
registrar: registrar:
image: image:
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.2.0 tag: v2.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -198,7 +198,7 @@ provisioner:
provisioner: provisioner:
image: image:
repository: k8s.gcr.io/sig-storage/csi-provisioner repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2 tag: v3.0.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -207,7 +207,7 @@ provisioner:
enabled: true enabled: true
image: image:
repository: k8s.gcr.io/sig-storage/csi-attacher repository: k8s.gcr.io/sig-storage/csi-attacher
tag: v3.2.1 tag: v3.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -223,7 +223,7 @@ provisioner:
snapshotter: snapshotter:
image: image:
repository: k8s.gcr.io/sig-storage/csi-snapshotter repository: k8s.gcr.io/sig-storage/csi-snapshotter
tag: v4.1.1 tag: v4.2.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}

View File

@ -43,7 +43,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"
@ -76,7 +76,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-snapshotter - name: csi-snapshotter
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"
@ -92,7 +92,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-cephfsplugin-attacher - name: csi-cephfsplugin-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"

View File

@ -25,7 +25,7 @@ spec:
# created by privileged CSI driver container. # created by privileged CSI driver container.
securityContext: securityContext:
privileged: true privileged: true
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=/csi/csi.sock" - "--csi-address=/csi/csi.sock"

View File

@ -47,7 +47,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"
@ -67,7 +67,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-snapshotter - name: csi-snapshotter
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"
@ -83,7 +83,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-attacher - name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"

View File

@ -28,7 +28,7 @@ spec:
# created by privileged CSI driver container. # created by privileged CSI driver container.
securityContext: securityContext:
privileged: true privileged: true
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=/csi/csi.sock" - "--csi-address=/csi/csi.sock"

2
go.mod
View File

@ -13,7 +13,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
github.com/kubernetes-csi/csi-lib-utils v0.9.1 github.com/kubernetes-csi/csi-lib-utils v0.9.1
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec
github.com/onsi/ginkgo v1.16.4 github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.13.0 github.com/onsi/gomega v1.13.0

4
go.sum
View File

@ -641,8 +641,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8=
github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0 h1:DecASDOSUnp0ftwi4aU87joEpZfLv9iMPwNYzrGb9Lc= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs= github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs=

View File

@ -595,7 +595,7 @@ func (cs *ControllerServer) CreateSnapshot(
info, err := parentVolOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName)) info, err := parentVolOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName))
if err != nil { if err != nil {
// Check error code value against ErrInvalidCommand to understand the cluster // Check error code value against ErrInvalidCommand to understand the cluster
// support it or not, its safe to evaluat as the filtering // support it or not, It's safe to evaluate as the filtering
// is already done from getSubVolumeInfo() and send out the error here. // is already done from getSubVolumeInfo() and send out the error here.
if errors.Is(err, ErrInvalidCommand) { if errors.Is(err, ErrInvalidCommand) {
return nil, status.Error( return nil, status.Error(

View File

@ -345,7 +345,7 @@ volume names as requested by the CSI drivers. Hence, these need to be invoked on
respective CSI driver generated volume name based locks are held, as otherwise racy respective CSI driver generated volume name based locks are held, as otherwise racy
access to these omaps may end up leaving them in an inconsistent state. access to these omaps may end up leaving them in an inconsistent state.
These functions also cleanup omap reservations that are stale. I.e when omap entries exist and These functions also cleanup omap reservations that are stale. I.e. when omap entries exist and
backing subvolumes are missing, or one of the omaps exist and the next is missing. This is backing subvolumes are missing, or one of the omaps exist and the next is missing. This is
because, the order of omap creation and deletion are inverse of each other, and protected by the because, the order of omap creation and deletion are inverse of each other, and protected by the
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are request name lock, and hence any stale omaps are leftovers from incomplete transactions and are

View File

@ -308,7 +308,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath) util.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }

View File

@ -109,9 +109,7 @@ func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (
// If info.BytesQuota == Infinite (in case it is not set) // If info.BytesQuota == Infinite (in case it is not set)
// or nil (in case the subvolume is in snapshot-retained state), // or nil (in case the subvolume is in snapshot-retained state),
// just continue without returning quota information. // just continue without returning quota information.
// TODO: make use of subvolume "state" attribute once if !(info.BytesQuota == fsAdmin.Infinite || info.State == fsAdmin.StateSnapRetained) {
// https://github.com/ceph/go-ceph/issues/453 is fixed.
if !(info.BytesQuota == fsAdmin.Infinite || info.BytesQuota == nil) {
return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", string(volID), info.BytesQuota) return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", string(volID), info.BytesQuota)
} }
} else { } else {

View File

@ -97,7 +97,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
return false, err return false, err
} }
} }
// snap will be create after we flatten the temporary cloned image,no // snap will be created after we flatten the temporary cloned image,no
// need to check for flatten here. // need to check for flatten here.
// as the snap exists,create clone image and delete temporary snapshot // as the snap exists,create clone image and delete temporary snapshot
// and add task to flatten temporary cloned image // and add task to flatten temporary cloned image
@ -267,7 +267,7 @@ func (rv *rbdVolume) flattenCloneImage(ctx context.Context) error {
// error message as it need to be flatten before continuing, this may leak // error message as it need to be flatten before continuing, this may leak
// omap entries and stale temporary snapshots in corner cases, if we reduce // omap entries and stale temporary snapshots in corner cases, if we reduce
// the limit and check for the depth of the parent image clain itself we // the limit and check for the depth of the parent image clain itself we
// can flatten the parent images before use to avoid the stale omap entries. // can flatten the parent images before used to avoid the stale omap entries.
hardLimit := rbdHardMaxCloneDepth hardLimit := rbdHardMaxCloneDepth
softLimit := rbdSoftMaxCloneDepth softLimit := rbdSoftMaxCloneDepth
// choosing 2 so that we don't need to flatten the image in the request. // choosing 2 so that we don't need to flatten the image in the request.

View File

@ -418,7 +418,7 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
"restoring thick-provisioned volume %q has been interrupted, please retry", rbdVol) "restoring thick-provisioned volume %q has been interrupted, please retry", rbdVol)
} }
} }
// restore from snapshot imploes rbdSnap != nil // restore from snapshot implies rbdSnap != nil
// check if image depth is reached limit and requires flatten // check if image depth is reached limit and requires flatten
err := checkFlatten(ctx, rbdVol, cr) err := checkFlatten(ctx, rbdVol, cr)
if err != nil { if err != nil {
@ -533,7 +533,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
return nil return nil
} }
// checkFlatten ensures that that the image chain depth is not reached // checkFlatten ensures that the image chain depth is not reached
// hardlimit or softlimit. if the softlimit is reached it adds a task and // hardlimit or softlimit. if the softlimit is reached it adds a task and
// return success,the hardlimit is reached it starts a task to flatten the // return success,the hardlimit is reached it starts a task to flatten the
// image and return Aborted. // image and return Aborted.
@ -831,6 +831,51 @@ func (cs *ControllerServer) DeleteVolume(
} }
defer cs.VolumeLocks.Release(rbdVol.RequestName) defer cs.VolumeLocks.Release(rbdVol.RequestName)
return cleanupRBDImage(ctx, rbdVol, cr)
}
// cleanupRBDImage removes the rbd image and OMAP metadata associated with it.
func cleanupRBDImage(ctx context.Context,
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil {
util.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
// Cleanup only omap data if the following condition is met
// Mirroring is enabled on the image
// Local image is secondary
// Local image is in up+replaying state
if mirroringInfo.State == librbd.MirrorImageEnabled && !mirroringInfo.Primary {
// If the image is in a secondary state and its up+replaying means its
// an healthy secondary and the image is primary somewhere in the
// remote cluster and the local image is getting replayed. Delete the
// OMAP data generated as we cannot delete the secondary image. When
// the image on the primary cluster gets deleted/mirroring disabled,
// the image on all the remote (secondary) clusters will get
// auto-deleted. This helps in garbage collecting the OMAP, PVC and PV
// objects after failback operation.
localStatus, rErr := rbdVol.getLocalState()
if rErr != nil {
return nil, status.Error(codes.Internal, rErr.Error())
}
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
util.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
rbdVol.RequestName, rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.DeleteVolumeResponse{}, nil
}
util.ErrorLog(ctx,
"secondary image status is up=%t and state=%s",
localStatus.Up,
localStatus.State)
}
inUse, err := rbdVol.isInUse() inUse, err := rbdVol.isInUse()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err) util.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err)
@ -1257,7 +1302,7 @@ func (cs *ControllerServer) DeleteSnapshot(
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
if err = genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil { if err = genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
// if error is ErrPoolNotFound, the pool is already deleted we dont // if error is ErrPoolNotFound, the pool is already deleted we don't
// need to worry about deleting snapshot or omap data, return success // need to worry about deleting snapshot or omap data, return success
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)

View File

@ -50,11 +50,11 @@ var (
// VolumeName to backing RBD images. // VolumeName to backing RBD images.
volJournal *journal.Config volJournal *journal.Config
snapJournal *journal.Config snapJournal *journal.Config
// rbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten // rbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before flatten
// occurs. // occurs.
rbdHardMaxCloneDepth uint rbdHardMaxCloneDepth uint
// rbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten // rbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before flatten
// occurs. // occurs.
rbdSoftMaxCloneDepth uint rbdSoftMaxCloneDepth uint
maxSnapshotsOnImage uint maxSnapshotsOnImage uint
@ -141,7 +141,7 @@ func (r *Driver) Run(conf *util.Config) {
// general // general
// In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY, // In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY,
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first // MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
// will work those as follow up features // will work those as follow-up features
r.cd.AddVolumeCapabilityAccessModes( r.cd.AddVolumeCapabilityAccessModes(
[]csi.VolumeCapability_AccessMode_Mode{ []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,

View File

@ -128,3 +128,24 @@ func (ri *rbdImage) getImageMirroringStatus() (*librbd.GlobalMirrorImageStatus,
return &statusInfo, nil return &statusInfo, nil
} }
// getLocalState returns the local state of the image.
func (ri *rbdImage) getLocalState() (librbd.SiteMirrorImageStatus, error) {
localStatus := librbd.SiteMirrorImageStatus{}
image, err := ri.open()
if err != nil {
return localStatus, fmt.Errorf("failed to open image %q with error: %w", ri, err)
}
defer image.Close()
statusInfo, err := image.GetGlobalMirrorStatus()
if err != nil {
return localStatus, fmt.Errorf("failed to get image mirroring status %q with error: %w", ri, err)
}
localStatus, err = statusInfo.LocalStatus()
if err != nil {
return localStatus, fmt.Errorf("failed to get local status: %w", err)
}
return localStatus, nil
}

View File

@ -321,31 +321,56 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
case librbd.MirrorImageDisabling: case librbd.MirrorImageDisabling:
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID) return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID)
case librbd.MirrorImageEnabled: case librbd.MirrorImageEnabled:
if !force && !mirroringInfo.Primary { return disableVolumeReplication(rbdVol, mirroringInfo, force)
return nil, status.Error(codes.InvalidArgument, "image is in non-primary state") default:
// TODO: use string instead of int for returning valid error message
return nil, status.Errorf(codes.InvalidArgument, "image is in %d Mode", mirroringInfo.State)
} }
err = rbdVol.disableImageMirroring(force)
if err != nil {
util.ErrorLog(ctx, err.Error())
return &replication.DisableVolumeReplicationResponse{}, nil
}
func disableVolumeReplication(rbdVol *rbdVolume,
mirroringInfo *librbd.MirrorImageInfo,
force bool) (*replication.DisableVolumeReplicationResponse, error) {
if !mirroringInfo.Primary {
// Return success if the below condition is met
// Local image is secondary
// Local image is in up+replaying state
// If the image is in a secondary and its state is up+replaying means
// its an healthy secondary and the image is primary somewhere in the
// remote cluster and the local image is getting replayed. Return
// success for the Disabling mirroring as we cannot disable mirroring
// on the secondary image, when the image on the primary site gets
// disabled the image on all the remote (secondary) clusters will get
// auto-deleted. This helps in garbage collecting the volume
// replication Kubernetes artifacts after failback operation.
localStatus, rErr := rbdVol.getLocalState()
if rErr != nil {
return nil, status.Error(codes.Internal, rErr.Error())
}
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
return &replication.DisableVolumeReplicationResponse{}, nil
}
return nil, status.Errorf(codes.InvalidArgument,
"secondary image status is up=%t and state=%s",
localStatus.Up,
localStatus.State)
}
err := rbdVol.disableImageMirroring(force)
if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
// the image state can be still disabling once we disable the mirroring // the image state can be still disabling once we disable the mirroring
// check the mirroring is disabled or not // check the mirroring is disabled or not
mirroringInfo, err = rbdVol.getImageMirroringInfo() mirroringInfo, err = rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if mirroringInfo.State == librbd.MirrorImageDisabling { if mirroringInfo.State == librbd.MirrorImageDisabling {
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID) return nil, status.Errorf(codes.Aborted, "%s is in disabling state", rbdVol.VolID)
}
return &replication.DisableVolumeReplicationResponse{}, nil
default:
// TODO: use string instead of int for returning valid error message
return nil, status.Errorf(codes.InvalidArgument, "image is in %d Mode", mirroringInfo.State)
} }
return &replication.DisableVolumeReplicationResponse{}, nil return &replication.DisableVolumeReplicationResponse{}, nil

View File

@ -162,6 +162,12 @@ if [[ "${VM_DRIVER}" == "kvm2" ]]; then
# use vda1 instead of sda1 when running with the libvirt driver # use vda1 instead of sda1 when running with the libvirt driver
DISK="vda1" DISK="vda1"
fi fi
#configure csi sidecar version
CSI_ATTACHER_VERSION=${CSI_ATTACHER_VERSION:-"v3.2.1"}
CSI_SNAPSHOTTER_VERSION=${CSI_SNAPSHOTTER_VERSION:-"v4.1.1"}
CSI_PROVISIONER_VERSION=${CSI_PROVISIONER_VERSION:-"v2.2.2"}
CSI_RESIZER_VERSION=${CSI_RESIZER_VERSION:-"v1.2.0"}
CSI_NODE_DRIVER_REGISTRAR_VERSION=${CSI_NODE_DRIVER_REGISTRAR_VERSION:-"v2.2.0"}
#feature-gates for kube #feature-gates for kube
K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"ExpandCSIVolumes=true"} K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"ExpandCSIVolumes=true"}
@ -278,11 +284,11 @@ cephcsi)
;; ;;
k8s-sidecar) k8s-sidecar)
echo "copying the kubernetes sidecar images" echo "copying the kubernetes sidecar images"
copy_image_to_cluster "${K8S_IMAGE_REPO}"/csi-attacher:v3.0.2 "${K8S_IMAGE_REPO}"/csi-attacher:v3.0.2 copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}" "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}"
copy_image_to_cluster "${K8S_IMAGE_REPO}"/csi-snapshotter:v3.0.2 $"${K8S_IMAGE_REPO}"/csi-snapshotter:v3.0.2 copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}" "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}"
copy_image_to_cluster "${K8S_IMAGE_REPO}"/csi-provisioner:v2.0.4 "${K8S_IMAGE_REPO}"/csi-provisioner:v2.0.4 copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-provisioner:${CSI_PROVISIONER_VERSION}" "${K8S_IMAGE_REPO}/csi-provisioner:${CSI_PROVISIONER_VERSION}"
copy_image_to_cluster "${K8S_IMAGE_REPO}"/csi-node-driver-registrar:v2.0.1 "${K8S_IMAGE_REPO}"/csi-node-driver-registrar:v2.0.1 copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_VERSION}" "${K8S_IMAGE_REPO}/csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_VERSION}"
copy_image_to_cluster "${K8S_IMAGE_REPO}"/csi-resizer:v1.0.1 "${K8S_IMAGE_REPO}"/csi-resizer:v1.0.1 copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-resizer:${CSI_RESIZER_VERSION}" "${K8S_IMAGE_REPO}/csi-resizer:${CSI_RESIZER_VERSION}"
;; ;;
clean) clean)
${minikube} delete ${minikube} delete

2
vendor/modules.txt vendored
View File

@ -182,7 +182,7 @@ github.com/kubernetes-csi/csi-lib-utils/connection
github.com/kubernetes-csi/csi-lib-utils/metrics github.com/kubernetes-csi/csi-lib-utils/metrics
github.com/kubernetes-csi/csi-lib-utils/protosanitizer github.com/kubernetes-csi/csi-lib-utils/protosanitizer
github.com/kubernetes-csi/csi-lib-utils/rpc github.com/kubernetes-csi/csi-lib-utils/rpc
# github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0 # github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
## explicit ## explicit
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1 github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1 github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1