cephfs: support omap store in radosnamespace

This commit adds the support for storing the CephFS omap data
in a namespace specified in the ceph-csi-config ConfigMap under
cephFS.radosNamespace field.

If the radosNamespace is not set, the default radosNamespace will
be used i.e, csi.

Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
Praveen M 2024-06-05 11:07:36 +05:30
parent 809f53cb84
commit c8eea14f0e
7 changed files with 38 additions and 33 deletions

View File

@ -12,5 +12,6 @@
- deploy: radosNamespaceCephFS can be configured for ceph-csi-cephfs chart in [PR](https://github.com/ceph/ceph-csi/pull/4652)
- build: update ceph release to squid in [PR](https://github.com/ceph/ceph-csi/pull/4735)
- build: CentOS Stream 9 is used as OS in the container-images [PR](https://github.com/ceph/ceph-csi/pull/4735)
- cephfs: support omap data store in radosnamespace [PR](https://github.com/ceph/ceph-csi/pull/4661)
## NOTE

View File

@ -101,6 +101,7 @@ type SubVolume struct {
VolID string // subvolume id.
FsName string // filesystem name.
SubvolumeGroup string // subvolume group name where subvolume will be created.
RadosNamespace string // rados namespace where omap data will be stored.
Pool string // pool name where subvolume will be created.
Features []string // subvolume features.
Size int64 // subvolume size.

View File

@ -27,7 +27,6 @@ import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -455,7 +454,7 @@ func (cs *ControllerServer) createSnapshotAndAddMapping(
return nil, err
}
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr)
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -637,7 +636,7 @@ func (cs *ControllerServer) deleteSnapshotsAndUndoReservation(ctx context.Contex
return err
}
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr)
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr)
if err != nil {
return err
}

View File

@ -19,7 +19,6 @@ package store
import (
"context"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/ceph-csi/internal/util/reftracker"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
@ -45,7 +44,7 @@ func AddSnapshotBackedVolumeRef(
}
defer ioctx.Destroy()
ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(volOptions.RadosNamespace)
var (
backingSnapID = volOptions.BackingSnapshotID
@ -90,7 +89,7 @@ func AddSnapshotBackedVolumeRef(
if created && !deleted {
log.ErrorLog(ctx, "orphaned reftracker object %s (pool %s, namespace %s)",
backingSnapID, volOptions.MetadataPool, fsutil.RadosNamespace)
backingSnapID, volOptions.MetadataPool, volOptions.RadosNamespace)
}
}()
@ -118,7 +117,7 @@ func UnrefSnapshotBackedVolume(
}
defer ioctx.Destroy()
ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(volOptions.RadosNamespace)
var (
backingSnapID = volOptions.BackingSnapshotID
@ -159,7 +158,7 @@ func UnrefSelfInSnapshotBackedVolumes(
}
defer ioctx.Destroy()
ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(snapParentVolOptions.RadosNamespace)
return reftracker.Remove(
radoswrapper.NewIOContext(ioctx),

View File

@ -23,7 +23,6 @@ import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -87,8 +86,7 @@ func CheckVolExists(ctx context.Context,
setMetadata bool,
) (*VolumeIdentifier, error) {
var vid VolumeIdentifier
// Connect to cephfs' default radosNamespace (csi)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -228,8 +226,7 @@ func UndoVolReservation(
}
defer cr.DeleteCredentials()
// Connect to cephfs' default radosNamespace (csi)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return err
}
@ -283,8 +280,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin
return nil, err
}
// Connect to cephfs' default radosNamespace (csi)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -329,8 +325,7 @@ func ReserveSnap(
err error
)
// Connect to cephfs' default radosNamespace (csi)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -368,8 +363,7 @@ func UndoSnapReservation(
snapName string,
cr *util.Credentials,
) error {
// Connect to cephfs' default radosNamespace (csi)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return err
}
@ -403,8 +397,7 @@ func CheckSnapExists(
setMetadata bool,
cr *util.Credentials,
) (*SnapshotIdentifier, error) {
// Connect to cephfs' default radosNamespace (csi)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}

View File

@ -22,7 +22,6 @@ import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -154,7 +153,7 @@ func NewVolumeGroupOptionsFromID(
return nil, nil, err
}
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, nil, err
}
@ -189,8 +188,7 @@ func CheckVolumeGroupSnapExists(
volOptions *VolumeGroupOptions,
cr *util.Credentials,
) (*VolumeGroupSnapshotIdentifier, error) {
// Connect to cephfs' default radosNamespace (csi)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -237,8 +235,7 @@ func ReserveVolumeGroup(
)
vgsi.RequestName = volOptions.RequestName
// Connect to cephfs' default radosNamespace (csi)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -271,8 +268,7 @@ func UndoVolumeGroupReservation(
vgsi *VolumeGroupSnapshotIdentifier,
cr *util.Credentials,
) error {
// Connect to cephfs' default radosNamespace (csi)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return err
}

View File

@ -239,6 +239,8 @@ func getVolumeOptions(vo map[string]string) (*VolumeOptions, error) {
// NewVolumeOptions generates a new instance of volumeOptions from the provided
// CSI request parameters.
//
//nolint:gocyclo,cyclop // TODO: reduce complexity
func NewVolumeOptions(
ctx context.Context,
requestName,
@ -314,6 +316,11 @@ func NewVolumeOptions(
return nil, err
}
opts.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, opts.ClusterID)
if err != nil {
return nil, err
}
// store topology information from the request
opts.TopologyPools, opts.TopologyRequirement, err = util.GetTopologyFromRequest(req)
if err != nil {
@ -405,6 +412,10 @@ func NewVolumeOptionsFromVolID(
return nil, nil, fmt.Errorf("failed to fetch subvolumegroup list using clusterID (%s): %w", vi.ClusterID, err)
}
if volOptions.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, vi.ClusterID); err != nil {
return nil, nil, fmt.Errorf("failed to fetch rados namespace using clusterID (%s): %w", vi.ClusterID, err)
}
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
return nil, nil, err
@ -434,8 +445,7 @@ func NewVolumeOptionsFromVolID(
return nil, nil, err
}
// Connect to cephfs' default radosNamespace (csi)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return nil, nil, err
}
@ -788,6 +798,13 @@ func NewSnapshotOptionsFromID(
err)
}
if volOptions.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, vi.ClusterID); err != nil {
return &volOptions, nil, &sid, fmt.Errorf(
"failed to fetch rados namespace using clusterID (%s): %w",
vi.ClusterID,
err)
}
err = volOptions.Connect(cr)
if err != nil {
return &volOptions, nil, &sid, err
@ -812,8 +829,7 @@ func NewSnapshotOptionsFromID(
return &volOptions, nil, &sid, err
}
// Connect to cephfs' default radosNamespace (csi)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if err != nil {
return &volOptions, nil, &sid, err
}