Add topology support to ceph-csi

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR
2020-01-24 11:26:56 -05:00
committed by mergify[bot]
parent 5475022bc3
commit 5c4abf8347
31 changed files with 1017 additions and 273 deletions

View File

@ -76,7 +76,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
}
defer cs.VolumeLocks.Release(requestName)
volOptions, err := newVolumeOptions(ctx, requestName, req.GetParameters(), secret)
volOptions, err := newVolumeOptions(ctx, requestName, req, secret)
if err != nil {
klog.Errorf(util.Log(ctx, "validation and extraction of volume options failed: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
@ -95,13 +95,20 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// TODO return error message if requested vol size greater than found volume return error
if vID != nil {
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: req.GetParameters(),
},
}, nil
volume := &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: req.GetParameters(),
}
if volOptions.Topology != nil {
volume.AccessibleTopology =
[]*csi.Topology{
{
Segments: volOptions.Topology,
},
}
}
return &csi.CreateVolumeResponse{Volume: volume}, nil
}
// Reservation
@ -128,13 +135,20 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully created backing volume named %s for request name %s"),
vID.FsSubvolName, requestName)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: req.GetParameters(),
},
}, nil
volume := &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: req.GetParameters(),
}
if volOptions.Topology != nil {
volume.AccessibleTopology =
[]*csi.Topology{
{
Segments: volOptions.Topology,
},
}
}
return &csi.CreateVolumeResponse{Volume: volume}, nil
}
// deleteVolumeDeprecated is used to delete volumes created using version 1.0.0 of the plugin,

View File

@ -26,7 +26,6 @@ import (
)
const (
// volIDVersion is the version number of volume ID encoding scheme
volIDVersion uint16 = 1
@ -81,9 +80,9 @@ func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersis
}
// NewNodeServer initialize a node server for ceph CSI driver.
func NewNodeServer(d *csicommon.CSIDriver, t string) *NodeServer {
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) *NodeServer {
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t),
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
}
}
@ -91,14 +90,17 @@ func NewNodeServer(d *csicommon.CSIDriver, t string) *NodeServer {
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests
func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
var err error
var topology map[string]string
// Configuration
PluginFolder = conf.PluginPath
if err := loadAvailableMounters(conf); err != nil {
if err = loadAvailableMounters(conf); err != nil {
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
}
if err := util.WriteCephConfig(); err != nil {
if err = util.WriteCephConfig(); err != nil {
klog.Fatalf("failed to write ceph configuration file: %v", err)
}
@ -137,14 +139,22 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
fs.is = NewIdentityServer(fs.cd)
if conf.IsNodeServer {
fs.ns = NewNodeServer(fs.cd, conf.Vtype)
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil {
klog.Fatalln(err)
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
}
if conf.IsControllerServer {
fs.cs = NewControllerServer(fs.cd, cachePersister)
}
if !conf.IsControllerServer && !conf.IsNodeServer {
fs.ns = NewNodeServer(fs.cd, conf.Vtype)
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil {
klog.Fatalln(err)
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.cs = NewControllerServer(fs.cd, cachePersister)
}

View File

@ -46,10 +46,7 @@ request name lock, and hence any stale omaps are leftovers from incomplete trans
hence safe to garbage collect.
*/
func checkVolExists(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
var (
vi util.CSIIdentifier
vid volumeIdentifier
)
var vid volumeIdentifier
cr, err := util.NewAdminCredentials(secret)
if err != nil {
@ -57,41 +54,36 @@ func checkVolExists(ctx context.Context, volOptions *volumeOptions, secret map[s
}
defer cr.DeleteCredentials()
imageUUID, err := volJournal.CheckReservation(ctx, volOptions.Monitors, cr,
imageData, err := volJournal.CheckReservation(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "")
if err != nil {
return nil, err
}
if imageUUID == "" {
if imageData == nil {
return nil, nil
}
// now that we now that the reservation exists, let's get the volume name from
// the omap
_, vid.FsSubvolName, _, _, err = volJournal.GetObjectUUIDData(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, imageUUID, false)
if err != nil {
return nil, err
}
imageUUID := imageData.ImageUUID
vid.FsSubvolName = imageData.ImageAttributes.ImageName
_, err = getVolumeRootPathCeph(ctx, volOptions, cr, volumeID(vid.FsSubvolName))
if err != nil {
if _, ok := err.(ErrVolumeNotFound); ok {
err = volJournal.UndoReservation(ctx, volOptions.Monitors, cr, volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
err = volJournal.UndoReservation(ctx, volOptions.Monitors, cr, volOptions.MetadataPool,
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
return nil, err
}
return nil, err
}
// check if topology constraints match what is found
// TODO: we need an API to fetch subvolume attributes (size/datapool and others), based
// on which we can evaluate which topology this belongs to.
// TODO: CephFS topology support is postponed till we get the same
// TODO: size checks
// found a volume already available, process and return it!
vi = util.CSIIdentifier{
LocationID: volOptions.FscID,
EncodingVersion: volIDVersion,
ClusterID: volOptions.ClusterID,
ObjectUUID: imageUUID,
}
vid.VolumeID, err = vi.ComposeCSIID()
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
if err != nil {
return nil, err
}
@ -111,16 +103,29 @@ func undoVolReservation(ctx context.Context, volOptions *volumeOptions, vid volu
defer cr.DeleteCredentials()
err = volJournal.UndoReservation(ctx, volOptions.Monitors, cr, volOptions.MetadataPool,
vid.FsSubvolName, volOptions.RequestName)
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
return err
}
func updateTopologyConstraints(volOpts *volumeOptions) error {
// update request based on topology constrained parameters (if present)
poolName, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
if err != nil {
return err
}
if poolName != "" {
volOpts.Pool = poolName
volOpts.Topology = topology
}
return nil
}
// reserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
// to generate the volume identifier for the reserved UUID
func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
var (
vi util.CSIIdentifier
vid volumeIdentifier
imageUUID string
err error
@ -132,20 +137,20 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
}
defer cr.DeleteCredentials()
imageUUID, vid.FsSubvolName, err = volJournal.ReserveName(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "")
err = updateTopologyConstraints(volOptions)
if err != nil {
return nil, err
}
imageUUID, vid.FsSubvolName, err = volJournal.ReserveName(ctx, volOptions.Monitors, cr, volOptions.MetadataPool, util.InvalidPoolID,
volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, volOptions.NamePrefix, "", "")
if err != nil {
return nil, err
}
// generate the volume ID to return to the CO system
vi = util.CSIIdentifier{
LocationID: volOptions.FscID,
EncodingVersion: volIDVersion,
ClusterID: volOptions.ClusterID,
ObjectUUID: imageUUID,
}
vid.VolumeID, err = vi.ComposeCSIID()
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
if err != nil {
return nil, err
}

View File

@ -48,6 +48,13 @@ func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.Ge
},
},
},
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS,
},
},
},
},
}, nil
}

View File

@ -21,26 +21,30 @@ import (
"fmt"
"strconv"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/pkg/errors"
"github.com/ceph/ceph-csi/pkg/util"
)
type volumeOptions struct {
RequestName string
NamePrefix string
Size int64
ClusterID string
FsName string
FscID int64
MetadataPool string
Monitors string `json:"monitors"`
Pool string `json:"pool"`
RootPath string `json:"rootPath"`
Mounter string `json:"mounter"`
ProvisionVolume bool `json:"provisionVolume"`
KernelMountOptions string `json:"kernelMountOptions"`
FuseMountOptions string `json:"fuseMountOptions"`
TopologyPools *[]util.TopologyConstrainedPool
TopologyRequirement *csi.TopologyRequirement
Topology map[string]string
RequestName string
NamePrefix string
Size int64
ClusterID string
FsName string
FscID int64
MetadataPool string
Monitors string `json:"monitors"`
Pool string `json:"pool"`
RootPath string `json:"rootPath"`
Mounter string `json:"mounter"`
ProvisionVolume bool `json:"provisionVolume"`
KernelMountOptions string `json:"kernelMountOptions"`
FuseMountOptions string `json:"fuseMountOptions"`
}
func validateNonEmptyField(field, fieldName string) error {
@ -127,12 +131,15 @@ func getMonsAndClusterID(options map[string]string) (string, string, error) {
// newVolumeOptions generates a new instance of volumeOptions from the provided
// CSI request parameters
func newVolumeOptions(ctx context.Context, requestName string, volOptions, secret map[string]string) (*volumeOptions, error) {
func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
secret map[string]string) (*volumeOptions, error) {
var (
opts volumeOptions
err error
)
volOptions := req.GetParameters()
opts.Monitors, opts.ClusterID, err = getMonsAndClusterID(volOptions)
if err != nil {
return nil, err
@ -176,6 +183,19 @@ func newVolumeOptions(ctx context.Context, requestName string, volOptions, secre
return nil, err
}
// store topology information from the request
opts.TopologyPools, opts.TopologyRequirement, err = util.GetTopologyFromRequest(req)
if err != nil {
return nil, err
}
// TODO: we need an API to fetch subvolume attributes (size/datapool and others), based
// on which we can evaluate which topology this belongs to.
// CephFS tracker: https://tracker.ceph.com/issues/44277
if opts.TopologyPools != nil {
return nil, errors.New("topology based provisioning is not supported for CephFS backed volumes")
}
opts.ProvisionVolume = true
return &opts, nil
@ -221,11 +241,13 @@ func newVolumeOptionsFromVolID(ctx context.Context, volID string, volOpt, secret
return nil, nil, err
}
volOptions.RequestName, vid.FsSubvolName, _, _, err = volJournal.GetObjectUUIDData(ctx, volOptions.Monitors, cr,
imageAttributes, err := volJournal.GetImageAttributes(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, vi.ObjectUUID, false)
if err != nil {
return nil, nil, err
}
volOptions.RequestName = imageAttributes.RequestName
vid.FsSubvolName = imageAttributes.ImageName
if volOpt != nil {
if err = extractOptionalOption(&volOptions.Pool, "pool", volOpt); err != nil {