mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-23 21:40:20 +00:00
journal: create object with provided UUID
incase of async mirroring the volume UUID is retrieved from the volume name, instead of cephcsi generating a new UUID it should reserve the passed UUID it will be useful when we support both metro DR and async mirroring on a kubernetes clusters. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
1f18e876f0
commit
ba84f14241
@ -234,7 +234,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
imageUUID, vid.FsSubvolName, err = j.ReserveName(
|
||||
ctx, volOptions.MetadataPool, util.InvalidPoolID,
|
||||
volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName,
|
||||
volOptions.NamePrefix, "", "")
|
||||
volOptions.NamePrefix, "", "", volOptions.ReservedID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -271,7 +271,7 @@ func reserveSnap(ctx context.Context, volOptions *volumeOptions, parentSubVolNam
|
||||
imageUUID, vid.FsSnapshotName, err = j.ReserveName(
|
||||
ctx, volOptions.MetadataPool, util.InvalidPoolID,
|
||||
volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName,
|
||||
snap.NamePrefix, parentSubVolName, "")
|
||||
snap.NamePrefix, parentSubVolName, "", snap.ReservedID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -41,6 +41,8 @@ type cephfsSnapshot struct {
|
||||
Pool string
|
||||
ClusterID string
|
||||
RequestName string
|
||||
// ReservedID represents the ID reserved for a snapshot
|
||||
ReservedID string
|
||||
}
|
||||
|
||||
func createSnapshot(ctx context.Context, volOptions *volumeOptions, cr *util.Credentials, snapID, volID volumeID) error {
|
||||
|
@ -38,16 +38,18 @@ type volumeOptions struct {
|
||||
ClusterID string
|
||||
FsName string
|
||||
FscID int64
|
||||
MetadataPool string
|
||||
Monitors string `json:"monitors"`
|
||||
Pool string `json:"pool"`
|
||||
RootPath string `json:"rootPath"`
|
||||
Mounter string `json:"mounter"`
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
KernelMountOptions string `json:"kernelMountOptions"`
|
||||
FuseMountOptions string `json:"fuseMountOptions"`
|
||||
SubvolumeGroup string
|
||||
Features []string
|
||||
// ReservedID represents the ID reserved for a subvolume
|
||||
ReservedID string
|
||||
MetadataPool string
|
||||
Monitors string `json:"monitors"`
|
||||
Pool string `json:"pool"`
|
||||
RootPath string `json:"rootPath"`
|
||||
Mounter string `json:"mounter"`
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
KernelMountOptions string `json:"kernelMountOptions"`
|
||||
FuseMountOptions string `json:"fuseMountOptions"`
|
||||
SubvolumeGroup string
|
||||
Features []string
|
||||
|
||||
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
||||
conn *util.ClusterConnection
|
||||
|
@ -437,18 +437,24 @@ func (conn *Connection) UndoReservation(ctx context.Context,
|
||||
// reserveOMapName creates an omap with passed in oMapNamePrefix and a generated <uuid>.
|
||||
// It ensures generated omap name does not already exist and if conflicts are detected, a set
|
||||
// number of retires with newer uuids are attempted before returning an error.
|
||||
func reserveOMapName(ctx context.Context, monitors string, cr *util.Credentials, pool, namespace, oMapNamePrefix string) (string, error) {
|
||||
func reserveOMapName(ctx context.Context, monitors string, cr *util.Credentials, pool, namespace, oMapNamePrefix, volUUID string) (string, error) {
|
||||
var iterUUID string
|
||||
|
||||
maxAttempts := 5
|
||||
attempt := 1
|
||||
for attempt <= maxAttempts {
|
||||
// generate a uuid for the image name
|
||||
iterUUID = uuid.NewUUID().String()
|
||||
if volUUID != "" {
|
||||
iterUUID = volUUID
|
||||
} else {
|
||||
// generate a uuid for the image name
|
||||
iterUUID = uuid.NewUUID().String()
|
||||
}
|
||||
|
||||
err := util.CreateObject(ctx, monitors, cr, pool, namespace, oMapNamePrefix+iterUUID)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrObjectExists) {
|
||||
// if the volUUID is empty continue with retry as consumer of this
|
||||
// function doesn't requested to create object with specific value.
|
||||
if volUUID == "" && errors.Is(err, util.ErrObjectExists) {
|
||||
attempt++
|
||||
// try again with a different uuid, for maxAttempts tries
|
||||
util.DebugLog(ctx, "uuid (%s) conflict detected, retrying (attempt %d of %d)",
|
||||
@ -483,6 +489,8 @@ Input arguments:
|
||||
- namePrefix: Prefix to use when generating the image/subvolume name (suffix is an auto-genetated UUID)
|
||||
- parentName: Name of the parent image/subvolume if reservation is for a snapshot (optional)
|
||||
- kmsConf: Name of the key management service used to encrypt the image (optional)
|
||||
- volUUID: UUID need to be reserved instead of auto-generating one (this is
|
||||
useful for mirroring and metro-DR)
|
||||
|
||||
Return values:
|
||||
- string: Contains the UUID that was reserved for the passed in reqName
|
||||
@ -492,17 +500,18 @@ Return values:
|
||||
func (conn *Connection) ReserveName(ctx context.Context,
|
||||
journalPool string, journalPoolID int64,
|
||||
imagePool string, imagePoolID int64,
|
||||
reqName, namePrefix, parentName, kmsConf string) (string, string, error) {
|
||||
reqName, namePrefix, parentName, kmsConf, volUUID string) (string, string, error) {
|
||||
// TODO: Take in-arg as ImageAttributes?
|
||||
var (
|
||||
snapSource bool
|
||||
nameKeyVal string
|
||||
cj = conn.config
|
||||
err error
|
||||
)
|
||||
|
||||
if parentName != "" {
|
||||
if cj.cephSnapSourceKey == "" {
|
||||
err := errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
err = errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
return "", "", err
|
||||
}
|
||||
snapSource = true
|
||||
@ -512,7 +521,7 @@ func (conn *Connection) ReserveName(ctx context.Context,
|
||||
// NOTE: If any service loss occurs post creation of the UUID directory, and before
|
||||
// setting the request name key (csiNameKey) to point back to the UUID directory, the
|
||||
// UUID directory key will be leaked
|
||||
volUUID, err := reserveOMapName(ctx, conn.monitors, conn.cr, imagePool, cj.namespace, cj.cephUUIDDirectoryPrefix)
|
||||
volUUID, err = reserveOMapName(ctx, conn.monitors, conn.cr, imagePool, cj.namespace, cj.cephUUIDDirectoryPrefix, volUUID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c
|
||||
|
||||
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
|
||||
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
|
||||
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, "")
|
||||
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, "", rbdSnap.ReservedID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -421,7 +421,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr
|
||||
|
||||
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
|
||||
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID)
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user