rbd: remove retrieving volumeHandle from PV annotation

we have added clusterID mapping to identify the volumes
in case of a failover in Disaster recovery in #1946.
with #2314 we are moving to a configuration in
configmap for clusterID and poolID mapping.
and with #2314 we have all the required information
to identify the image mappings.
This commit removes the workaround implementation done
in #1946.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2021-12-14 14:40:36 +05:30 committed by mergify[bot]
parent e743e06748
commit 50d6ea825c
4 changed files with 7 additions and 80 deletions

View File

@ -74,13 +74,10 @@ secondary cluster as the PoolID and ClusterID always may not be the same.
To solve this problem, We will have a new controller(rbdplugin controller)
running as part of provisioner pod which watches for the PV objects. When a PV
is created it will extract the required information from the PV spec and it
will regenerate the OMAP data and also it will generate a new VolumeHandle
(`newclusterID-newpoolID-volumeuniqueID`) and it adds a PV annotation
`csi.ceph.io/volume-handle` for mapping between old VolumeHandle and new
VolumeHandle. Whenever Ceph-CSI gets a RPC request with older VolumeHandle, it
will check if any new VolumeHandle exists for the old VolumeHandle. If yes, it
uses the new VolumeHandle for internal operations (to get pool name, Ceph
monitor details from the ClusterID etc).
will regenerate the OMAP data. Whenever Ceph-CSI gets a RPC request with older
VolumeHandle, it will check if any new VolumeHandle exists for the old
VolumeHandle. If yes, it uses the new VolumeHandle for internal operations (to
get pool name, Ceph monitor details from the ClusterID etc).
Currently, We are making use of watchers in node stage request to make sure
ReadWriteOnce (RWO) PVC is mounted on a single node at a given point in time.
@ -92,6 +89,8 @@ To solve the ClusterID problem, If the ClusterID is different on the second
cluster, the admin has to create a new ConfigMap for the mapped ClusterID's.
Whenever Ceph-CSI gets a request, it will check if the ClusterID mapping exists
and uses the mapped ClusterID to get the information like Ceph monitors etc.
Details about creation of clusterID's mapping are available at
[clusterID-mapping](./clusterid-mapping.md).
**This design does not cover the below items:**

View File

@ -129,28 +129,6 @@ func checkStaticVolume(pv *corev1.PersistentVolume) bool {
return pv.Spec.CSI.VolumeAttributes["staticVolume"] == "true"
}
// storeVolumeIDInPV stores the new volumeID in PV object.
func (r ReconcilePersistentVolume) storeVolumeIDInPV(
ctx context.Context,
pv *corev1.PersistentVolume,
newVolumeID string) error {
if v, ok := pv.Annotations[rbd.PVVolumeHandleAnnotationKey]; ok {
if v == newVolumeID {
return nil
}
}
if pv.Annotations == nil {
pv.Annotations = make(map[string]string)
}
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
pv.Labels[rbd.PVReplicatedLabelKey] = rbd.PVReplicatedLabelValue
pv.Annotations[rbd.PVVolumeHandleAnnotationKey] = newVolumeID
return r.client.Update(ctx, pv)
}
// reconcilePV will extract the image details from the pv spec and regenerates
// the omap data.
func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.Object) error {
@ -200,12 +178,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
return err
}
if rbdVolID != volumeHandler {
err = r.storeVolumeIDInPV(ctx, pv, rbdVolID)
if err != nil {
log.ErrorLogMsg("failed to store volumeID in PV %s", err)
return err
}
log.DebugLog(ctx, "volumeHandler changed from %s to %s", volumeHandler, rbdVolID)
}
return nil

View File

@ -26,15 +26,6 @@ import (
"github.com/ceph/ceph-csi/internal/util/log"
)
const (
// PVVolumeHandleAnnotationKey is the annotation key set on the PV object.
PVVolumeHandleAnnotationKey = "csi.ceph.io/volume-handle"
// PVReplicatedLabelKey is the label key set on PV object.
PVReplicatedLabelKey = "csi.ceph.io/replicated-volume"
// PVReplicatedLabelValue is the label value set on PV object.
PVReplicatedLabelValue = "volume-handle-detected"
)
func validateNonEmptyField(field, fieldName, structName string) error {
if field == "" {
return fmt.Errorf("value '%s' in '%s' structure cannot be empty", fieldName, structName)

View File

@ -29,7 +29,6 @@ import (
"time"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/rados"
@ -38,7 +37,6 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cloud-provider/volume/helpers"
mount "k8s.io/mount-utils"
@ -1179,40 +1177,6 @@ func GenVolFromVolID(
return rbdVol, vErr
}
}
// TODO: remove extracting volumeID from PV annotations.
// If the volume details are not found in the OMAP it can be a mirrored RBD
// image and the OMAP is already generated and the volumeHandle might not
// be the same in the PV.Spec.CSI.VolumeHandle. Check the PV annotation for
// the new volumeHandle. If the new volumeHandle is found, generate the RBD
// volume structure from the new volumeHandle.
c, cErr := k8s.NewK8sClient()
if cErr != nil {
return vol, cErr
}
listOpt := metav1.ListOptions{
LabelSelector: PVReplicatedLabelKey,
}
pvlist, pErr := c.CoreV1().PersistentVolumes().List(context.TODO(), listOpt)
if pErr != nil {
return vol, pErr
}
for i := range pvlist.Items {
if pvlist.Items[i].Spec.CSI != nil && pvlist.Items[i].Spec.CSI.VolumeHandle == volumeID {
if v, ok := pvlist.Items[i].Annotations[PVVolumeHandleAnnotationKey]; ok {
log.UsefulLog(ctx, "found new volumeID %s for existing volumeID %s", v, volumeID)
err = vi.DecomposeCSIID(v)
if err != nil {
return vol, fmt.Errorf("%w: error decoding volume ID (%s) (%s)",
ErrInvalidVolID, err, v)
}
return generateVolumeFromVolumeID(ctx, v, vi, cr, secrets)
}
}
}
return vol, err
}