2019-05-14 19:15:01 +00:00
|
|
|
/*
|
|
|
|
Copyright 2019 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-24 09:14:15 +00:00
|
|
|
"context"
|
2020-06-25 11:30:04 +00:00
|
|
|
"errors"
|
2019-05-14 19:15:01 +00:00
|
|
|
"fmt"
|
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/journal"
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2022-02-21 11:24:24 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/k8s"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2019-05-14 19:15:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func validateNonEmptyField(field, fieldName, structName string) error {
|
|
|
|
if field == "" {
|
|
|
|
return fmt.Errorf("value '%s' in '%s' structure cannot be empty", fieldName, structName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateRbdSnap(rbdSnap *rbdSnapshot) error {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdSnap.RequestName, "RequestName", "rbdSnapshot"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdSnap.Monitors, "Monitors", "rbdSnapshot"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdSnap.Pool, "Pool", "rbdSnapshot"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdSnap.RbdImageName, "RbdImageName", "rbdSnapshot"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdSnap.ClusterID, "ClusterID", "rbdSnapshot"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateRbdVol(rbdVol *rbdVolume) error {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdVol.RequestName, "RequestName", "rbdVolume"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdVol.Monitors, "Monitors", "rbdVolume"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdVol.Pool, "Pool", "rbdVolume"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = validateNonEmptyField(rbdVol.ClusterID, "ClusterID", "rbdVolume"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if rbdVol.VolSize == 0 {
|
|
|
|
return errors.New("value 'VolSize' in 'rbdVolume' structure cannot be 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-12 15:19:34 +00:00
|
|
|
func getEncryptionConfig(rbdVol *rbdVolume) (string, util.EncryptionType) {
|
|
|
|
switch {
|
|
|
|
case rbdVol.isBlockEncrypted():
|
|
|
|
return rbdVol.blockEncryption.GetID(), util.EncryptionTypeBlock
|
|
|
|
case rbdVol.isFileEncrypted():
|
|
|
|
return rbdVol.fileEncryption.GetID(), util.EncryptionTypeFile
|
|
|
|
default:
|
2022-09-06 16:46:56 +00:00
|
|
|
return "", util.EncryptionTypeNone
|
2022-08-12 15:19:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
/*
|
2020-06-24 07:43:24 +00:00
|
|
|
checkSnapCloneExists, and its counterpart checkVolExists, function checks if
|
|
|
|
the passed in rbdSnapshot or rbdVolume exists on the backend.
|
|
|
|
|
|
|
|
**NOTE:** These functions manipulate the rados omaps that hold information
|
|
|
|
regarding volume names as requested by the CSI drivers. Hence, these need to be
|
|
|
|
invoked only when the respective CSI driver generated snapshot or volume name
|
|
|
|
based locks are held, as otherwise racy access to these omaps may end up
|
|
|
|
leaving them in an inconsistent state.
|
|
|
|
|
|
|
|
These functions need enough information about cluster and pool (ie, Monitors,
|
|
|
|
Pool, IDs filled in) to operate. They further require that the RequestName
|
|
|
|
element of the structure have a valid value to operate on and determine if the
|
|
|
|
said RequestName already exists on the backend.
|
|
|
|
|
|
|
|
These functions populate the snapshot or the image name, its attributes and the
|
|
|
|
CSI snapshot/volume ID for the same when successful.
|
|
|
|
|
|
|
|
These functions also cleanup omap reservations that are stale. I.e when omap
|
|
|
|
entries exist and backing images or snapshots are missing, or one of the omaps
|
|
|
|
exist and the next is missing. This is because, the order of omap creation and
|
|
|
|
deletion are inverse of each other, and protected by the request name lock, and
|
|
|
|
hence any stale omaps are leftovers from incomplete transactions and are hence
|
|
|
|
safe to garbage collect.
|
2019-05-14 19:15:01 +00:00
|
|
|
*/
|
2021-06-25 11:52:34 +00:00
|
|
|
func checkSnapCloneExists(
|
|
|
|
ctx context.Context,
|
|
|
|
parentVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
2022-06-01 10:17:19 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
) (bool, error) {
|
2019-05-14 19:15:01 +00:00
|
|
|
err := validateRbdSnap(rbdSnap)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool,
|
2022-09-06 16:46:56 +00:00
|
|
|
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeNone)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
if snapData == nil {
|
2019-05-14 19:15:01 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
snapUUID := snapData.ImageUUID
|
|
|
|
rbdSnap.RbdSnapName = snapData.ImageAttributes.ImageName
|
2020-07-06 06:22:34 +00:00
|
|
|
rbdSnap.ImageID = snapData.ImageAttributes.ImageID
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
// it should never happen that this disagrees, but check
|
|
|
|
if rbdSnap.Pool != snapData.ImagePool {
|
|
|
|
return false, fmt.Errorf("stored snapshot pool (%s) and expected snapshot pool (%s) mismatch",
|
|
|
|
snapData.ImagePool, rbdSnap.Pool)
|
2020-02-24 13:19:42 +00:00
|
|
|
}
|
2019-05-14 19:15:01 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
vol := generateVolFromSnap(rbdSnap)
|
|
|
|
defer vol.Destroy()
|
|
|
|
err = vol.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
vol.ReservedID = snapUUID
|
|
|
|
// Fetch on-disk image attributes
|
|
|
|
err = vol.getImageInfo()
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2020-06-24 07:43:24 +00:00
|
|
|
err = parentVol.deleteSnapshot(ctx, rbdSnap)
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrSnapNotFound) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
2020-12-01 09:42:53 +00:00
|
|
|
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshot creation transaction is rolled forward if rbd clone image
|
|
|
|
// representing the snapshot is found. Any failures till finding the image
|
|
|
|
// causes a roll back of the snapshot creation transaction.
|
|
|
|
// Code from here on, rolls the transaction forward.
|
|
|
|
|
|
|
|
rbdSnap.CreatedAt = vol.CreatedAt
|
2021-12-20 14:14:10 +00:00
|
|
|
rbdSnap.VolSize = vol.VolSize
|
2019-05-14 19:15:01 +00:00
|
|
|
// found a snapshot already available, process and return its information
|
2021-03-12 12:50:07 +00:00
|
|
|
rbdSnap.VolID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, snapData.ImagePoolID, rbdSnap.Pool,
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdSnap.ClusterID, snapUUID, volIDVersion)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
// check snapshot exists if not create it
|
2020-07-07 12:14:19 +00:00
|
|
|
err = vol.checkSnapExists(rbdSnap)
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrSnapNotFound) {
|
2020-06-24 07:43:24 +00:00
|
|
|
// create snapshot
|
|
|
|
sErr := vol.createSnapshot(ctx, rbdSnap)
|
|
|
|
if sErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr)
|
2020-12-01 09:42:53 +00:00
|
|
|
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-07-06 06:22:34 +00:00
|
|
|
if vol.ImageID == "" {
|
2020-06-24 07:43:24 +00:00
|
|
|
sErr := vol.getImageID()
|
|
|
|
if sErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr)
|
2020-12-01 09:42:53 +00:00
|
|
|
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2020-10-14 16:19:03 +00:00
|
|
|
sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID)
|
2020-06-24 07:43:24 +00:00
|
|
|
if sErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr)
|
2020-12-01 09:42:53 +00:00
|
|
|
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
2019-05-14 19:15:01 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)",
|
2021-03-12 12:50:07 +00:00
|
|
|
rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check comment on checkSnapExists, to understand how this function behaves
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
**NOTE:** These functions manipulate the rados omaps that hold information
|
|
|
|
regarding volume names as requested by the CSI drivers. Hence, these need to be
|
|
|
|
invoked only when the respective CSI snapshot or volume name based locks are
|
|
|
|
held, as otherwise racy access to these omaps may end up leaving the omaps in
|
|
|
|
an inconsistent state.
|
|
|
|
|
|
|
|
parentVol is required to check the clone is created from the requested parent
|
|
|
|
image or not, if temporary snapshots and clones created for the volume when the
|
|
|
|
content source is volume we need to recover from the stale entries or complete
|
|
|
|
the pending operations.
|
2019-05-14 19:15:01 +00:00
|
|
|
*/
|
2020-07-07 12:14:19 +00:00
|
|
|
func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, error) {
|
2020-04-06 09:16:23 +00:00
|
|
|
err := validateRbdVol(rv)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2022-08-12 15:19:34 +00:00
|
|
|
kmsID, encryptionType := getEncryptionConfig(rv)
|
2020-01-24 16:26:56 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
imageData, err := j.CheckReservation(
|
2022-08-12 15:19:34 +00:00
|
|
|
ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID, encryptionType)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
if imageData == nil {
|
2019-05-14 19:15:01 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
rv.ReservedID = imageData.ImageUUID
|
2020-04-06 09:16:23 +00:00
|
|
|
rv.RbdImageName = imageData.ImageAttributes.ImageName
|
2020-07-06 06:22:34 +00:00
|
|
|
rv.ImageID = imageData.ImageAttributes.ImageID
|
2020-01-24 16:26:56 +00:00
|
|
|
// check if topology constraints match what is found
|
2022-03-08 12:45:01 +00:00
|
|
|
_, _, rv.Topology, err = util.MatchPoolAndTopology(rv.TopologyPools, rv.TopologyRequirement,
|
2020-04-06 09:16:23 +00:00
|
|
|
imageData.ImagePool)
|
2020-02-24 13:19:42 +00:00
|
|
|
if err != nil {
|
2020-01-24 16:26:56 +00:00
|
|
|
// TODO check if need any undo operation here, or ErrVolNameConflict
|
2020-02-24 13:19:42 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
// update Pool, if it was topology constrained
|
2020-04-06 09:16:23 +00:00
|
|
|
if rv.Topology != nil {
|
|
|
|
rv.Pool = imageData.ImagePool
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2019-05-14 19:15:01 +00:00
|
|
|
|
|
|
|
// NOTE: Return volsize should be on-disk volsize, not request vol size, so
|
|
|
|
// save it for size checks before fetching image data
|
2021-05-06 09:49:27 +00:00
|
|
|
requestSize := rv.VolSize //nolint:ifshort // FIXME: rename and split function into helpers
|
2019-05-14 19:15:01 +00:00
|
|
|
// Fetch on-disk image attributes and compare against request
|
2020-05-08 14:14:05 +00:00
|
|
|
err = rv.getImageInfo()
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2020-07-07 12:14:19 +00:00
|
|
|
// Need to check cloned info here not on createvolume,
|
|
|
|
if parentVol != nil {
|
|
|
|
found, cErr := rv.checkCloneImage(ctx, parentVol)
|
2021-04-05 04:40:00 +00:00
|
|
|
switch {
|
|
|
|
case found && cErr == nil:
|
2020-07-07 12:14:19 +00:00
|
|
|
return true, nil
|
2021-04-05 04:40:00 +00:00
|
|
|
case cErr != nil:
|
2020-07-07 12:14:19 +00:00
|
|
|
return false, cErr
|
|
|
|
}
|
|
|
|
}
|
2020-05-12 21:05:55 +00:00
|
|
|
err = j.UndoReservation(ctx, rv.JournalPool, rv.Pool,
|
2020-04-06 09:16:23 +00:00
|
|
|
rv.RbdImageName, rv.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2021-11-18 10:59:50 +00:00
|
|
|
err = rv.repairImageID(ctx, j, false)
|
2021-04-08 13:04:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2020-06-30 15:57:43 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
// size checks
|
2020-04-06 09:16:23 +00:00
|
|
|
if rv.VolSize < requestSize {
|
2020-07-10 01:05:42 +00:00
|
|
|
return false, fmt.Errorf("%w: image with the same name (%s) but with different size already exists",
|
|
|
|
ErrVolNameConflict, rv.RbdImageName)
|
2019-05-14 19:15:01 +00:00
|
|
|
}
|
|
|
|
// TODO: We should also ensure image features and format is the same
|
|
|
|
|
|
|
|
// found a volume already available, process and return it!
|
2020-04-06 09:16:23 +00:00
|
|
|
rv.VolID, err = util.GenerateVolID(ctx, rv.Monitors, rv.conn.Creds, imageData.ImagePoolID, rv.Pool,
|
2020-06-24 07:43:24 +00:00
|
|
|
rv.ClusterID, rv.ReservedID, volIDVersion)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2022-04-25 10:15:08 +00:00
|
|
|
if parentVol != nil {
|
|
|
|
err = parentVol.copyEncryptionConfig(&rv.rbdImage, true)
|
2021-04-08 13:04:32 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 13:04:32 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "found existing volume (%s) with image name (%s) for request (%s)",
|
2020-04-06 09:16:23 +00:00
|
|
|
rv.VolID, rv.RbdImageName, rv.RequestName)
|
2019-05-14 19:15:01 +00:00
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2021-04-08 13:04:32 +00:00
|
|
|
// repairImageID checks if rv.ImageID is already available (if so, it was
|
|
|
|
// fetched from the journal), in case it is missing, the imageID is obtained
|
|
|
|
// and stored in the journal.
|
2021-11-18 10:59:50 +00:00
|
|
|
// if the force is set to true, the latest imageID will get added/updated in OMAP.
|
|
|
|
func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection, force bool) error {
|
|
|
|
if force {
|
|
|
|
// reset the imageID so that we can fetch latest imageID from ceph cluster.
|
|
|
|
rv.ImageID = ""
|
|
|
|
}
|
|
|
|
|
2021-04-08 13:04:32 +00:00
|
|
|
if rv.ImageID != "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := rv.getImageID()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 13:04:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 13:04:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
|
2020-07-19 12:21:03 +00:00
|
|
|
// volume ID for the generated name.
|
2020-06-24 07:43:24 +00:00
|
|
|
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
|
2021-07-13 12:21:05 +00:00
|
|
|
var err error
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2022-08-12 15:19:34 +00:00
|
|
|
kmsID, encryptionType := getEncryptionConfig(rbdVol)
|
2021-03-30 20:42:23 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
|
2020-05-12 21:05:55 +00:00
|
|
|
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
|
2022-08-12 15:19:34 +00:00
|
|
|
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner,
|
|
|
|
"", encryptionType)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:50:07 +00:00
|
|
|
rbdSnap.VolID, err = util.GenerateVolID(ctx, rbdSnap.Monitors, cr, imagePoolID, rbdSnap.Pool,
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap.ClusterID, rbdSnap.ReservedID, volIDVersion)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
|
2021-03-12 12:50:07 +00:00
|
|
|
rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
|
2019-05-14 19:15:01 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
|
|
|
|
var err error
|
|
|
|
if rbdSnap != nil {
|
|
|
|
// check if topology constraints matches snapshot pool
|
2022-03-08 12:45:01 +00:00
|
|
|
var poolName string
|
|
|
|
var dataPoolName string
|
|
|
|
|
|
|
|
poolName, dataPoolName, rbdVol.Topology, err = util.MatchPoolAndTopology(rbdVol.TopologyPools,
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdVol.TopologyRequirement, rbdSnap.Pool)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// update Pool, if it was topology constrained
|
|
|
|
if rbdVol.Topology != nil {
|
2022-03-08 12:45:01 +00:00
|
|
|
rbdVol.Pool = poolName
|
|
|
|
rbdVol.DataPool = dataPoolName
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// update request based on topology constrained parameters (if present)
|
2020-04-06 20:19:13 +00:00
|
|
|
poolName, dataPoolName, topology, err := util.FindPoolAndTopology(rbdVol.TopologyPools, rbdVol.TopologyRequirement)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if poolName != "" {
|
|
|
|
rbdVol.Pool = poolName
|
2020-04-06 20:19:13 +00:00
|
|
|
rbdVol.DataPool = dataPoolName
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdVol.Topology = topology
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the
|
2020-07-19 12:21:03 +00:00
|
|
|
// volume ID for the generated name.
|
2020-01-24 16:26:56 +00:00
|
|
|
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
|
2021-07-13 12:21:05 +00:00
|
|
|
var err error
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
err = updateTopologyConstraints(rbdVol, rbdSnap)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdVol.Monitors, rbdVol.JournalPool, rbdVol.Pool, cr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-12 15:19:34 +00:00
|
|
|
kmsID, encryptionType := getEncryptionConfig(rbdVol)
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
|
2020-05-12 21:05:55 +00:00
|
|
|
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
|
2022-08-12 15:19:34 +00:00
|
|
|
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "", encryptionType)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
|
2019-05-14 19:15:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// undoSnapReservation is a helper routine to undo a name reservation for rbdSnapshot.
|
2019-08-22 17:19:06 +00:00
|
|
|
func undoSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
err = j.UndoReservation(
|
|
|
|
ctx, rbdSnap.JournalPool, rbdSnap.Pool, rbdSnap.RbdSnapName,
|
|
|
|
rbdSnap.RequestName)
|
2019-05-14 19:15:01 +00:00
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// undoVolReservation is a helper routine to undo a name reservation for rbdVolume.
|
2019-08-22 17:19:06 +00:00
|
|
|
func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
2020-06-01 13:57:51 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
2020-05-12 21:05:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
err = j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool,
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdVol.RbdImageName, rbdVol.RequestName)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
|
|
|
|
// RegenerateJournal regenerates the omap data for the static volumes, the
|
|
|
|
// input parameters imageName, volumeID, pool, journalPool, requestName will be
|
|
|
|
// present in the PV.Spec.CSI object based on that we can regenerate the
|
|
|
|
// complete omap mapping between imageName and volumeID.
|
|
|
|
|
|
|
|
// RegenerateJournal performs below operations
|
2022-04-14 11:59:47 +00:00
|
|
|
// Extract clusterID, Mons after checking clusterID mapping
|
2021-07-16 09:40:09 +00:00
|
|
|
// Extract parameters journalPool, pool from volumeAttributes
|
2021-07-16 10:07:56 +00:00
|
|
|
// Extract optional parameters volumeNamePrefix, kmsID, owner from volumeAttributes
|
2020-10-21 12:49:45 +00:00
|
|
|
// Extract information from volumeID
|
|
|
|
// Get pool ID from pool name
|
|
|
|
// Extract uuid from volumeID
|
|
|
|
// Reserve omap data
|
|
|
|
// Generate new volume Handler
|
2020-11-30 03:36:22 +00:00
|
|
|
// The volume handler won't remain same as its contains poolID,clusterID etc
|
2020-10-21 12:49:45 +00:00
|
|
|
// which are not same across clusters.
|
2022-10-27 14:51:27 +00:00
|
|
|
// nolint:gocyclo,cyclop,nestif // TODO: reduce complexity
|
2021-06-25 11:52:34 +00:00
|
|
|
func RegenerateJournal(
|
2021-07-16 09:40:09 +00:00
|
|
|
volumeAttributes map[string]string,
|
2022-02-21 11:24:24 +00:00
|
|
|
claimName,
|
|
|
|
volumeID,
|
|
|
|
requestName,
|
2022-04-11 04:27:29 +00:00
|
|
|
owner,
|
|
|
|
clusterName string,
|
2022-04-12 04:03:00 +00:00
|
|
|
setMetadata bool,
|
2022-06-01 10:17:19 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
) (string, error) {
|
2020-10-21 12:49:45 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
var (
|
2022-08-12 15:19:34 +00:00
|
|
|
vi util.CSIIdentifier
|
|
|
|
rbdVol *rbdVolume
|
|
|
|
kmsID string
|
|
|
|
encryptionType util.EncryptionType
|
|
|
|
err error
|
|
|
|
ok bool
|
2020-10-21 12:49:45 +00:00
|
|
|
)
|
|
|
|
|
2021-03-12 12:37:15 +00:00
|
|
|
rbdVol = &rbdVolume{}
|
|
|
|
rbdVol.VolID = volumeID
|
2022-04-11 04:27:29 +00:00
|
|
|
rbdVol.ClusterName = clusterName
|
2022-04-12 04:03:00 +00:00
|
|
|
rbdVol.EnableMetadata = setMetadata
|
2020-10-21 12:49:45 +00:00
|
|
|
|
2021-07-16 10:07:56 +00:00
|
|
|
err = vi.DecomposeCSIID(rbdVol.VolID)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", fmt.Errorf("%w: error decoding volume ID (%s) (%s)",
|
2020-10-21 12:49:45 +00:00
|
|
|
ErrInvalidVolID, err, rbdVol.VolID)
|
|
|
|
}
|
|
|
|
|
2022-03-15 12:58:02 +00:00
|
|
|
rbdVol.Owner = owner
|
|
|
|
|
2023-05-24 13:43:15 +00:00
|
|
|
kmsID, encryptionType, err = ParseEncryptionOpts(volumeAttributes, util.EncryptionTypeNone)
|
2021-07-16 10:07:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2021-08-26 10:21:29 +00:00
|
|
|
rbdVol.Monitors, rbdVol.ClusterID, err = util.FetchMappedClusterIDAndMons(ctx, vi.ClusterID)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 09:40:09 +00:00
|
|
|
if rbdVol.Pool, ok = volumeAttributes["pool"]; !ok {
|
|
|
|
return "", errors.New("required 'pool' parameter missing in volume attributes")
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
err = rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
2021-07-16 09:40:09 +00:00
|
|
|
rbdVol.JournalPool = volumeAttributes["journalPool"]
|
2020-10-21 12:49:45 +00:00
|
|
|
if rbdVol.JournalPool == "" {
|
|
|
|
rbdVol.JournalPool = rbdVol.Pool
|
|
|
|
}
|
2021-06-28 05:11:20 +00:00
|
|
|
volJournal = journal.NewCSIVolumeJournal(CSIInstanceID)
|
2020-10-21 12:49:45 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdVol.Monitors, rbdVol.JournalPool, rbdVol.Pool, cr)
|
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rbdVol.RequestName = requestName
|
2021-07-16 09:44:52 +00:00
|
|
|
rbdVol.NamePrefix = volumeAttributes["volumeNamePrefix"]
|
2020-10-21 12:49:45 +00:00
|
|
|
|
|
|
|
imageData, err := j.CheckReservation(
|
2022-08-12 15:19:34 +00:00
|
|
|
ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, encryptionType)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
2022-02-21 11:24:24 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
if imageData != nil {
|
|
|
|
rbdVol.ReservedID = imageData.ImageUUID
|
|
|
|
rbdVol.ImageID = imageData.ImageAttributes.ImageID
|
2020-11-25 17:06:53 +00:00
|
|
|
rbdVol.Owner = imageData.ImageAttributes.Owner
|
2022-02-21 11:24:24 +00:00
|
|
|
rbdVol.RbdImageName = imageData.ImageAttributes.ImageName
|
2020-10-21 12:49:45 +00:00
|
|
|
if rbdVol.ImageID == "" {
|
2020-12-04 08:36:05 +00:00
|
|
|
err = rbdVol.storeImageID(ctx, j)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-27 14:51:27 +00:00
|
|
|
if rbdVol.Owner != owner {
|
|
|
|
err = j.ResetVolumeOwner(ctx, rbdVol.JournalPool, rbdVol.ReservedID, owner)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
2022-02-21 11:24:24 +00:00
|
|
|
// Update Metadata on reattach of the same old PV
|
2022-10-27 14:49:42 +00:00
|
|
|
parameters := k8s.PrepareVolumeMetadata(claimName, owner, "")
|
2022-04-28 06:33:30 +00:00
|
|
|
err = rbdVol.setAllMetadata(parameters)
|
2022-02-21 11:24:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to set volume metadata: %w", err)
|
|
|
|
}
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// As the omap already exists for this image ID return nil.
|
|
|
|
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
|
|
|
|
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return rbdVol.VolID, nil
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
|
|
|
|
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
|
2022-08-12 15:19:34 +00:00
|
|
|
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "", encryptionType)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 07:56:53 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
undoErr := j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool,
|
|
|
|
rbdVol.RbdImageName, rbdVol.RequestName)
|
|
|
|
if undoErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to undo reservation %s: %v", rbdVol, undoErr)
|
2020-12-04 07:56:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2020-10-21 12:49:45 +00:00
|
|
|
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
|
|
|
|
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
|
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "re-generated Volume ID (%s) and image name (%s) for request name (%s)",
|
2020-10-21 12:49:45 +00:00
|
|
|
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
|
|
|
|
if rbdVol.ImageID == "" {
|
2020-12-04 08:36:05 +00:00
|
|
|
err = rbdVol.storeImageID(ctx, j)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return "", err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return rbdVol.VolID, nil
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 08:36:05 +00:00
|
|
|
// storeImageID retrieves the image ID and stores it in OMAP.
|
|
|
|
func (rv *rbdVolume) storeImageID(ctx context.Context, j *journal.Connection) error {
|
|
|
|
err := rv.getImageID()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:36:05 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:36:05 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:36:05 +00:00
|
|
|
return nil
|
|
|
|
}
|