cleanup: move pkg/ to internal/

The internal/ directory in Go has a special meaning, and indicates that
those packages are not meant for external consumption. Ceph-CSI does
provide public APIs for other projects to consume. There is no plan to
keep the API of the internally used packages stable.

Closes: #903
Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos
2020-04-17 11:23:49 +02:00
committed by mergify[bot]
parent d0abc3f5e6
commit 32839948ef
64 changed files with 37 additions and 37 deletions

View File

@ -0,0 +1,118 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"fmt"
"github.com/ceph/ceph-csi/internal/util"
)
// MDSMap is a representation of the mds map sub-structure returned by 'ceph fs get'
type MDSMap struct {
FilesystemName string `json:"fs_name"`
}
// CephFilesystemDetails is a representation of the main json structure returned by 'ceph fs get'
type CephFilesystemDetails struct {
ID int64 `json:"id"`
MDSMap MDSMap `json:"mdsmap"`
}
func getFscID(ctx context.Context, monitors string, cr *util.Credentials, fsName string) (int64, error) {
// ceph fs get myfs --format=json
// {"mdsmap":{...},"id":2}
var fsDetails CephFilesystemDetails
err := execCommandJSON(ctx, &fsDetails,
"ceph",
"-m", monitors,
"--id", cr.ID,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "get", fsName, "--format=json",
)
if err != nil {
return 0, err
}
return fsDetails.ID, nil
}
// CephFilesystem is a representation of the json structure returned by 'ceph fs ls'
type CephFilesystem struct {
Name string `json:"name"`
MetadataPool string `json:"metadata_pool"`
MetadataPoolID int `json:"metadata_pool_id"`
DataPools []string `json:"data_pools"`
DataPoolIDs []int `json:"data_pool_ids"`
}
func getMetadataPool(ctx context.Context, monitors string, cr *util.Credentials, fsName string) (string, error) {
// ./tbox ceph fs ls --format=json
// [{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,...},...]
var filesystems []CephFilesystem
err := execCommandJSON(ctx, &filesystems,
"ceph",
"-m", monitors,
"--id", cr.ID,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "ls", "--format=json",
)
if err != nil {
return "", err
}
for _, fs := range filesystems {
if fs.Name == fsName {
return fs.MetadataPool, nil
}
}
return "", util.ErrPoolNotFound{Pool: fsName, Err: fmt.Errorf("fsName (%s) not found in Ceph cluster", fsName)}
}
// CephFilesystemDump is a representation of the main json structure returned by 'ceph fs dump'
type CephFilesystemDump struct {
Filesystems []CephFilesystemDetails `json:"filesystems"`
}
func getFsName(ctx context.Context, monitors string, cr *util.Credentials, fscID int64) (string, error) {
// ./tbox ceph fs dump --format=json
// JSON: {...,"filesystems":[{"mdsmap":{},"id":<n>},...],...}
var fsDump CephFilesystemDump
err := execCommandJSON(ctx, &fsDump,
"ceph",
"-m", monitors,
"--id", cr.ID,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "dump", "--format=json",
)
if err != nil {
return "", err
}
for _, fs := range fsDump.Filesystems {
if fs.ID == fscID {
return fs.MDSMap.FilesystemName, nil
}
}
return "", util.ErrPoolNotFound{Pool: string(fscID), Err: fmt.Errorf("fscID (%d) not found in Ceph cluster", fscID)}
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"github.com/ceph/ceph-csi/internal/util"
)
const (
cephUserPrefix = "user-"
cephEntityClientPrefix = "client."
)
func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID)
}
func getCephUserName(volID volumeID) string {
return cephUserPrefix + string(volID)
}
func deleteCephUserDeprecated(ctx context.Context, volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error {
adminID, userID := genUserIDs(adminCr, volID)
// TODO: Need to return success if userID is not found
return execCommandErr(ctx, "ceph",
"-m", volOptions.Monitors,
"-n", adminID,
"--keyfile="+adminCr.KeyFile,
"-c", util.CephConfigPath,
"auth", "rm", userID,
)
}

View File

@ -0,0 +1,373 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
)
// ControllerServer struct of CEPH CSI driver with supported methods of CSI
// controller server spec.
type ControllerServer struct {
*csicommon.DefaultControllerServer
MetadataStore util.CachePersister
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID/volume name) return an Aborted error
VolumeLocks *util.VolumeLocks
}
type controllerCacheEntry struct {
VolOptions volumeOptions
VolumeID volumeID
}
// createBackingVolume creates the backing subvolume and on any error cleans up any created entities
func (cs *ControllerServer) createBackingVolume(ctx context.Context, volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
if err = createVolume(ctx, volOptions, cr, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
klog.Errorf(util.Log(ctx, "failed to create volume %s: %v"), volOptions.RequestName, err)
return status.Error(codes.Internal, err.Error())
}
return nil
}
// CreateVolume creates a reservation and the volume in backend, if it is not already present
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateCreateVolumeRequest(req); err != nil {
klog.Errorf(util.Log(ctx, "CreateVolumeRequest validation failed: %v"), err)
return nil, err
}
// Configuration
secret := req.GetSecrets()
requestName := req.GetName()
// Existence and conflict checks
if acquired := cs.VolumeLocks.TryAcquire(requestName); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), requestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, requestName)
}
defer cs.VolumeLocks.Release(requestName)
volOptions, err := newVolumeOptions(ctx, requestName, req, secret)
if err != nil {
klog.Errorf(util.Log(ctx, "validation and extraction of volume options failed: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if req.GetCapacityRange() != nil {
volOptions.Size = util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
}
// TODO need to add check for 0 volume size
vID, err := checkVolExists(ctx, volOptions, secret)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// TODO return error message if requested vol size greater than found volume return error
if vID != nil {
volumeContext := req.GetParameters()
volumeContext["subvolumeName"] = vID.FsSubvolName
volume := &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: volumeContext,
}
if volOptions.Topology != nil {
volume.AccessibleTopology =
[]*csi.Topology{
{
Segments: volOptions.Topology,
},
}
}
return &csi.CreateVolumeResponse{Volume: volume}, nil
}
// Reservation
vID, err = reserveVol(ctx, volOptions, secret)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer func() {
if err != nil {
errDefer := undoVolReservation(ctx, volOptions, *vID, secret)
if errDefer != nil {
klog.Warningf(util.Log(ctx, "failed undoing reservation of volume: %s (%s)"),
requestName, errDefer)
}
}
}()
// Create a volume
err = cs.createBackingVolume(ctx, volOptions, vID, secret)
if err != nil {
return nil, err
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully created backing volume named %s for request name %s"),
vID.FsSubvolName, requestName)
volumeContext := req.GetParameters()
volumeContext["subvolumeName"] = vID.FsSubvolName
volume := &csi.Volume{
VolumeId: vID.VolumeID,
CapacityBytes: volOptions.Size,
VolumeContext: volumeContext,
}
if volOptions.Topology != nil {
volume.AccessibleTopology =
[]*csi.Topology{
{
Segments: volOptions.Topology,
},
}
}
return &csi.CreateVolumeResponse{Volume: volume}, nil
}
// deleteVolumeDeprecated is used to delete volumes created using version 1.0.0 of the plugin,
// that have state information stored in files or kubernetes config maps
func (cs *ControllerServer) deleteVolumeDeprecated(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
var (
volID = volumeID(req.GetVolumeId())
secrets = req.GetSecrets()
)
ce := &controllerCacheEntry{}
if err := cs.MetadataStore.Get(string(volID), ce); err != nil {
if err, ok := err.(*util.CacheEntryNotFound); ok {
klog.Warningf(util.Log(ctx, "cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)"), volID, err)
return &csi.DeleteVolumeResponse{}, nil
}
return nil, status.Error(codes.Internal, err.Error())
}
if !ce.VolOptions.ProvisionVolume {
// DeleteVolume() is forbidden for statically provisioned volumes!
klog.Warningf(util.Log(ctx, "volume %s is provisioned statically, aborting delete"), volID)
return &csi.DeleteVolumeResponse{}, nil
}
// mons may have changed since create volume,
// retrieve the latest mons and override old mons
if mon, secretsErr := util.GetMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
klog.V(3).Infof(util.Log(ctx, "overriding monitors [%q] with [%q] for volume %s"), ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}
// Deleting a volume requires admin credentials
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to retrieve admin credentials: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
}
defer cs.VolumeLocks.Release(string(volID))
if err = purgeVolumeDeprecated(ctx, volID, cr, &ce.VolOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to delete volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = deleteCephUserDeprecated(ctx, &ce.VolOptions, cr, volID); err != nil {
klog.Errorf(util.Log(ctx, "failed to delete ceph user for volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = cs.MetadataStore.Delete(string(volID)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully deleted volume %s"), volID)
return &csi.DeleteVolumeResponse{}, nil
}
// DeleteVolume deletes the volume in backend and its reservation
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.validateDeleteVolumeRequest(); err != nil {
klog.Errorf(util.Log(ctx, "DeleteVolumeRequest validation failed: %v"), err)
return nil, err
}
volID := volumeID(req.GetVolumeId())
secrets := req.GetSecrets()
// lock out parallel delete operations
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
}
defer cs.VolumeLocks.Release(string(volID))
// Find the volume using the provided VolumeID
volOptions, vID, err := newVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
if err != nil {
// if error is ErrPoolNotFound, the pool is already deleted we dont
// need to worry about deleting subvolume or omap data, return success
if _, ok := err.(util.ErrPoolNotFound); ok {
klog.Warningf(util.Log(ctx, "failed to get backend volume for %s: %v"), string(volID), err)
return &csi.DeleteVolumeResponse{}, nil
}
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
// or partially complete (subvolume and imageOMap are garbage collected already), hence
// return success as deletion is complete
if _, ok := err.(util.ErrKeyNotFound); ok {
return &csi.DeleteVolumeResponse{}, nil
}
// ErrInvalidVolID may mean this is an 1.0.0 version volume
if _, ok := err.(ErrInvalidVolID); ok && cs.MetadataStore != nil {
return cs.deleteVolumeDeprecated(ctx, req)
}
// All errors other than ErrVolumeNotFound should return an error back to the caller
if _, ok := err.(ErrVolumeNotFound); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
// If error is ErrImageNotFound then we failed to find the subvolume, but found the imageOMap
// to lead us to the image, hence the imageOMap needs to be garbage collected, by calling
// unreserve for the same
if acquired := cs.VolumeLocks.TryAcquire(volOptions.RequestName); !acquired {
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volOptions.RequestName)
}
defer cs.VolumeLocks.Release(volOptions.RequestName)
if err = undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.DeleteVolumeResponse{}, nil
}
// lock out parallel delete and create requests against the same volume name as we
// cleanup the subvolume and associated omaps for the same
if acquired := cs.VolumeLocks.TryAcquire(volOptions.RequestName); !acquired {
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volOptions.RequestName)
}
defer cs.VolumeLocks.Release(string(volID))
// Deleting a volume requires admin credentials
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to retrieve admin credentials: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
if err = purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to delete volume %s: %v"), volID, err)
// All errors other than ErrVolumeNotFound should return an error back to the caller
if _, ok := err.(ErrVolumeNotFound); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
}
if err := undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully deleted volume %s"), volID)
return &csi.DeleteVolumeResponse{}, nil
}
// ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported.
func (cs *ControllerServer) ValidateVolumeCapabilities(
ctx context.Context,
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// Cephfs doesn't support Block volume
for _, cap := range req.VolumeCapabilities {
if cap.GetBlock() != nil {
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
}
return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeCapabilities: req.VolumeCapabilities,
},
}, nil
}
// ControllerExpandVolume expands CephFS Volumes on demand based on resizer request
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
if err := cs.validateExpandVolumeRequest(req); err != nil {
klog.Errorf(util.Log(ctx, "ControllerExpandVolumeRequest validation failed: %v"), err)
return nil, err
}
volID := req.GetVolumeId()
secret := req.GetSecrets()
// lock out parallel delete operations
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer cs.VolumeLocks.Release(volID)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
if err != nil {
klog.Errorf(util.Log(ctx, "validation and extraction of volume options failed: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
if err = createVolume(ctx, volOptions, cr, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
klog.Errorf(util.Log(ctx, "failed to expand volume %s: %v"), volumeID(volIdentifier.FsSubvolName), err)
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.ControllerExpandVolumeResponse{
CapacityBytes: RoundOffSize,
NodeExpansionRequired: false,
}, nil
}

168
internal/cephfs/driver.go Normal file
View File

@ -0,0 +1,168 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"k8s.io/klog"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
)
const (
// volIDVersion is the version number of volume ID encoding scheme
volIDVersion uint16 = 1
// csiConfigFile is the location of the CSI config file
csiConfigFile = "/etc/ceph-csi-config/config.json"
// RADOS namespace to store CSI specific objects and keys
radosNamespace = "csi"
)
// PluginFolder defines the location of ceph plugin
var PluginFolder = ""
// Driver contains the default identity,node and controller struct
type Driver struct {
cd *csicommon.CSIDriver
is *IdentityServer
ns *NodeServer
cs *ControllerServer
}
var (
// CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing
// ceph clusters across CSI instances, to differentiate omap names per CSI instance
CSIInstanceID = "default"
// volJournal is used to maintain RADOS based journals for CO generated
// VolumeName to backing CephFS subvolumes
volJournal *util.CSIJournal
)
// NewDriver returns new ceph driver
func NewDriver() *Driver {
return &Driver{}
}
// NewIdentityServer initialize a identity server for ceph CSI driver
func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
return &IdentityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
// NewControllerServer initialize a controller server for ceph CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
VolumeLocks: util.NewVolumeLocks(),
}
}
// NewNodeServer initialize a node server for ceph CSI driver.
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) *NodeServer {
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
}
}
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests
func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
var err error
var topology map[string]string
// Configuration
PluginFolder = conf.PluginPath
if err = loadAvailableMounters(conf); err != nil {
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
}
if err = util.WriteCephConfig(); err != nil {
klog.Fatalf("failed to write ceph configuration file: %v", err)
}
// Use passed in instance ID, if provided for omap suffix naming
if conf.InstanceID != "" {
CSIInstanceID = conf.InstanceID
}
// Get an instance of the volume journal
volJournal = util.NewCSIVolumeJournal()
// Update keys with CSI instance suffix
volJournal.SetCSIDirectorySuffix(CSIInstanceID)
// Update namespace for storing keys into a specific namespace on RADOS, in the CephFS
// metadata pool
volJournal.SetNamespace(radosNamespace)
// Initialize default library driver
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if fs.cd == nil {
klog.Fatalln("failed to initialize CSI driver")
}
if conf.IsControllerServer || !conf.IsNodeServer {
fs.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
})
fs.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
})
}
// Create gRPC servers
fs.is = NewIdentityServer(fs.cd)
if conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil {
klog.Fatalln(err)
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
}
if conf.IsControllerServer {
fs.cs = NewControllerServer(fs.cd, cachePersister)
}
if !conf.IsControllerServer && !conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil {
klog.Fatalln(err)
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.cs = NewControllerServer(fs.cd, cachePersister)
}
server := csicommon.NewNonBlockingGRPCServer()
server.Start(conf.Endpoint, conf.HistogramOption, fs.is, fs.cs, fs.ns, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics {
klog.Warning("EnableGRPCMetrics is deprecated")
go util.StartMetricsServer(conf)
}
server.Wait()
}

46
internal/cephfs/errors.go Normal file
View File

@ -0,0 +1,46 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
// ErrInvalidVolID is returned when a CSI passed VolumeID is not conformant to any known volume ID
// formats
type ErrInvalidVolID struct {
err error
}
func (e ErrInvalidVolID) Error() string {
return e.err.Error()
}
// ErrNonStaticVolume is returned when a volume is detected as not being
// statically provisioned
type ErrNonStaticVolume struct {
err error
}
func (e ErrNonStaticVolume) Error() string {
return e.err.Error()
}
// ErrVolumeNotFound is returned when a subvolume is not found in CephFS
type ErrVolumeNotFound struct {
err error
}
func (e ErrVolumeNotFound) Error() string {
return e.err.Error()
}

View File

@ -0,0 +1,162 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"github.com/ceph/ceph-csi/internal/util"
"k8s.io/klog"
)
// volumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
// name on the backing CephFS instance
type volumeIdentifier struct {
FsSubvolName string
VolumeID string
}
/*
checkVolExists checks to determine if passed in RequestName in volOptions exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information regarding
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
respective CSI driver generated volume name based locks are held, as otherwise racy
access to these omaps may end up leaving them in an inconsistent state.
These functions also cleanup omap reservations that are stale. I.e when omap entries exist and
backing subvolumes are missing, or one of the omaps exist and the next is missing. This is
because, the order of omap creation and deletion are inverse of each other, and protected by the
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
hence safe to garbage collect.
*/
func checkVolExists(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
var vid volumeIdentifier
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
imageData, err := volJournal.CheckReservation(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "")
if err != nil {
return nil, err
}
if imageData == nil {
return nil, nil
}
imageUUID := imageData.ImageUUID
vid.FsSubvolName = imageData.ImageAttributes.ImageName
_, err = getVolumeRootPathCeph(ctx, volOptions, cr, volumeID(vid.FsSubvolName))
if err != nil {
if _, ok := err.(ErrVolumeNotFound); ok {
err = volJournal.UndoReservation(ctx, volOptions.Monitors, cr, volOptions.MetadataPool,
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
return nil, err
}
return nil, err
}
// check if topology constraints match what is found
// TODO: we need an API to fetch subvolume attributes (size/datapool and others), based
// on which we can evaluate which topology this belongs to.
// TODO: CephFS topology support is postponed till we get the same
// TODO: size checks
// found a volume already available, process and return it!
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
if err != nil {
return nil, err
}
klog.V(4).Infof(util.Log(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)"),
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
return &vid, nil
}
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName
func undoVolReservation(ctx context.Context, volOptions *volumeOptions, vid volumeIdentifier, secret map[string]string) error {
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return err
}
defer cr.DeleteCredentials()
err = volJournal.UndoReservation(ctx, volOptions.Monitors, cr, volOptions.MetadataPool,
volOptions.MetadataPool, vid.FsSubvolName, volOptions.RequestName)
return err
}
func updateTopologyConstraints(volOpts *volumeOptions) error {
// update request based on topology constrained parameters (if present)
poolName, _, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
if err != nil {
return err
}
if poolName != "" {
volOpts.Pool = poolName
volOpts.Topology = topology
}
return nil
}
// reserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
// to generate the volume identifier for the reserved UUID
func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
var (
vid volumeIdentifier
imageUUID string
err error
)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
err = updateTopologyConstraints(volOptions)
if err != nil {
return nil, err
}
imageUUID, vid.FsSubvolName, err = volJournal.ReserveName(ctx, volOptions.Monitors, cr, volOptions.MetadataPool, util.InvalidPoolID,
volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, volOptions.NamePrefix, "", "")
if err != nil {
return nil, err
}
// generate the volume ID to return to the CO system
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
if err != nil {
return nil, err
}
klog.V(4).Infof(util.Log(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)"),
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
return &vid, nil
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/container-storage-interface/spec/lib/go/csi"
)
// IdentityServer struct of ceph CSI driver with supported methods of CSI
// identity server spec.
type IdentityServer struct {
*csicommon.DefaultIdentityServer
}
// GetPluginCapabilities returns available capabilities of the ceph driver
func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
},
},
},
{
Type: &csi.PluginCapability_VolumeExpansion_{
VolumeExpansion: &csi.PluginCapability_VolumeExpansion{
Type: csi.PluginCapability_VolumeExpansion_ONLINE,
},
},
},
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS,
},
},
},
},
}, nil
}

View File

@ -0,0 +1,297 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"fmt"
"os"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
)
// NodeServer struct of ceph CSI driver with supported methods of CSI
// node server spec.
type NodeServer struct {
*csicommon.DefaultNodeServer
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID) return an Aborted error
VolumeLocks *util.VolumeLocks
}
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
err error
cr *util.Credentials
secrets = req.GetSecrets()
)
if volOptions.ProvisionVolume {
// The volume is provisioned dynamically, use passed in admin credentials
cr, err = util.NewAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
} else {
// The volume is pre-made, credentials are in node stage secrets
cr, err = util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
}
return cr, nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var (
volOptions *volumeOptions
)
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err
}
// Configuration
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
}
defer ns.VolumeLocks.Release(req.GetVolumeId())
volOptions, _, err := newVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
if err != nil {
if _, ok := err.(ErrInvalidVolID); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
// check for pre-provisioned volumes (plugin versions > 1.0.0)
volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
if err != nil {
if _, ok := err.(ErrNonStaticVolume); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
// check for volumes from plugin versions <= 1.0.0
volOptions, _, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(),
req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil {
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already mounted to %s, skipping"), volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
// It's not, mount now
if err = ns.mount(ctx, volOptions, req); err != nil {
return nil, err
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully mounted volume %s to %s"), volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
cr, err := getCredentialsForVolume(volOptions, req)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to get ceph credentials for volume %s: %v"), volID, err)
return status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
m, err := newMounter(volOptions)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to create mounter for volume %s: %v"), volID, err)
return status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: mounting volume %s with %s"), volID, m.name())
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to mount volume %s: %v"), volID, err)
return status.Error(codes.Internal, err.Error())
}
return nil
}
// NodePublishVolume mounts the volume mounted to the staging path to the target
// path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
mountOptions := []string{"bind", "_netdev"}
if err := util.ValidateNodePublishVolumeRequest(req); err != nil {
return nil, err
}
targetPath := req.GetTargetPath()
volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
if err := util.CreateMountPoint(targetPath); err != nil {
klog.Errorf(util.Log(ctx, "failed to create mount point at %s: %v"), targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(targetPath)
if err != nil {
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already bind-mounted to %s"), volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
// It's not, mount now
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to bind-mount volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
// #nosec - allow anyone to write inside the target path
err = os.Chmod(targetPath, 0777)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to change targetpath permission for volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
var err error
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
return nil, err
}
volID := req.GetVolumeId()
targetPath := req.GetTargetPath()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
// Unmount the bind-mount
if err = unmountVolume(ctx, targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
err = os.Remove(targetPath)
if err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unbinded volume %s from %s"), req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
var err error
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
return nil, err
}
volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
stagingTargetPath := req.GetStagingTargetPath()
// Unmount the volume
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unmounted volume %s from %s"), req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
},
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
},
},
},
},
}, nil
}

134
internal/cephfs/util.go Normal file
View File

@ -0,0 +1,134 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
)
type volumeID string
func execCommand(ctx context.Context, program string, args ...string) (stdout, stderr []byte, err error) {
var (
cmd = exec.Command(program, args...) // nolint: gosec, #nosec
sanitizedArgs = util.StripSecretInArgs(args)
stdoutBuf bytes.Buffer
stderrBuf bytes.Buffer
)
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
klog.V(4).Infof(util.Log(ctx, "cephfs: EXEC %s %s"), program, sanitizedArgs)
if err := cmd.Run(); err != nil {
if cmd.Process == nil {
return nil, nil, fmt.Errorf("cannot get process pid while running %s %v: %v: %s",
program, sanitizedArgs, err, stderrBuf.Bytes())
}
return nil, nil, fmt.Errorf("an error occurred while running (%d) %s %v: %v: %s",
cmd.Process.Pid, program, sanitizedArgs, err, stderrBuf.Bytes())
}
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
}
func execCommandErr(ctx context.Context, program string, args ...string) error {
_, _, err := execCommand(ctx, program, args...)
return err
}
//nolint: unparam
func execCommandJSON(ctx context.Context, v interface{}, program string, args ...string) error {
stdout, _, err := execCommand(ctx, program, args...)
if err != nil {
return err
}
if err = json.Unmarshal(stdout, v); err != nil {
return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, util.StripSecretInArgs(args), stdout, err)
}
return nil
}
func pathExists(p string) bool {
_, err := os.Stat(p)
return err == nil
}
// Controller service request validation
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
}
if req.GetName() == "" {
return status.Error(codes.InvalidArgument, "volume Name cannot be empty")
}
reqCaps := req.GetVolumeCapabilities()
if reqCaps == nil {
return status.Error(codes.InvalidArgument, "volume Capabilities cannot be empty")
}
for _, cap := range reqCaps {
if cap.GetBlock() != nil {
return status.Error(codes.Unimplemented, "block volume not supported")
}
}
return nil
}
func (cs *ControllerServer) validateDeleteVolumeRequest() error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid DeleteVolumeRequest: %v", err)
}
return nil
}
// Controller expand volume request validation
func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {
return fmt.Errorf("invalid ExpandVolumeRequest: %v", err)
}
if req.GetVolumeId() == "" {
return status.Error(codes.InvalidArgument, "Volume ID cannot be empty")
}
capRange := req.GetCapacityRange()
if capRange == nil {
return status.Error(codes.InvalidArgument, "CapacityRange cannot be empty")
}
return nil
}

231
internal/cephfs/volume.go Normal file
View File

@ -0,0 +1,231 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"fmt"
"os"
"path"
"strconv"
"strings"
"github.com/ceph/ceph-csi/internal/util"
"k8s.io/klog"
)
const (
csiSubvolumeGroup = "csi"
)
var (
// cephfsInit is used to create "csi" subvolume group for the first time the csi plugin loads.
// Subvolume group create gets called every time the plugin loads, though it doesn't result in error
// its unnecessary
cephfsInit = false
)
func getCephRootVolumePathLocalDeprecated(volID volumeID) string {
return path.Join(getCephRootPathLocalDeprecated(volID), "csi-volumes", string(volID))
}
func getVolumeRootPathCephDeprecated(volID volumeID) string {
return path.Join("/", "csi-volumes", string(volID))
}
func getCephRootPathLocalDeprecated(volID volumeID) string {
return fmt.Sprintf("%s/controller/volumes/root-%s", PluginFolder, string(volID))
}
func getVolumeNotFoundErrorString(volID volumeID) string {
return fmt.Sprintf("Error ENOENT: Subvolume '%s' not found", string(volID))
}
func getVolumeRootPathCeph(ctx context.Context, volOptions *volumeOptions, cr *util.Credentials, volID volumeID) (string, error) {
stdout, stderr, err := util.ExecCommand(
"ceph",
"fs",
"subvolume",
"getpath",
volOptions.FsName,
string(volID),
"--group_name",
csiSubvolumeGroup,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to get the rootpath for the vol %s(%s)"), string(volID), err)
if strings.Contains(string(stderr), getVolumeNotFoundErrorString(volID)) {
return "", ErrVolumeNotFound{err}
}
return "", err
}
return strings.TrimSuffix(string(stdout), "\n"), nil
}
func createVolume(ctx context.Context, volOptions *volumeOptions, cr *util.Credentials, volID volumeID, bytesQuota int64) error {
//TODO: When we support multiple fs, need to hande subvolume group create for all fs's
if !cephfsInit {
err := execCommandErr(
ctx,
"ceph",
"fs",
"subvolumegroup",
"create",
volOptions.FsName,
csiSubvolumeGroup,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to create subvolume group csi, for the vol %s(%s)"), string(volID), err)
return err
}
klog.V(4).Infof(util.Log(ctx, "cephfs: created subvolume group csi"))
cephfsInit = true
}
args := []string{
"fs",
"subvolume",
"create",
volOptions.FsName,
string(volID),
strconv.FormatInt(bytesQuota, 10),
"--group_name",
csiSubvolumeGroup,
"--mode", "777",
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix + cr.ID,
"--keyfile=" + cr.KeyFile,
}
if volOptions.Pool != "" {
args = append(args, "--pool_layout", volOptions.Pool)
}
err := execCommandErr(
ctx,
"ceph",
args[:]...)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to create subvolume %s(%s) in fs %s"), string(volID), err, volOptions.FsName)
return err
}
return nil
}
func mountCephRoot(ctx context.Context, volID volumeID, volOptions *volumeOptions, adminCr *util.Credentials) error {
cephRoot := getCephRootPathLocalDeprecated(volID)
// Root path is not set for dynamically provisioned volumes
// Access to cephfs's / is required
volOptions.RootPath = "/"
if err := util.CreateMountPoint(cephRoot); err != nil {
return err
}
m, err := newMounter(volOptions)
if err != nil {
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(ctx, cephRoot, adminCr, volOptions); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err)
}
return nil
}
func unmountCephRoot(ctx context.Context, volID volumeID) {
cephRoot := getCephRootPathLocalDeprecated(volID)
if err := unmountVolume(ctx, cephRoot); err != nil {
klog.Errorf(util.Log(ctx, "failed to unmount %s with error %s"), cephRoot, err)
} else {
if err := os.Remove(cephRoot); err != nil {
klog.Errorf(util.Log(ctx, "failed to remove %s with error %s"), cephRoot, err)
}
}
}
func purgeVolumeDeprecated(ctx context.Context, volID volumeID, adminCr *util.Credentials, volOptions *volumeOptions) error {
if err := mountCephRoot(ctx, volID, volOptions, adminCr); err != nil {
return err
}
defer unmountCephRoot(ctx, volID)
var (
volRoot = getCephRootVolumePathLocalDeprecated(volID)
volRootDeleting = volRoot + "-deleting"
)
if pathExists(volRoot) {
if err := os.Rename(volRoot, volRootDeleting); err != nil {
return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err)
}
} else {
if !pathExists(volRootDeleting) {
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s not found, assuming it to be already deleted"), volID)
return nil
}
}
if err := os.RemoveAll(volRootDeleting); err != nil {
return fmt.Errorf("failed to delete volume %s: %v", volID, err)
}
return nil
}
func purgeVolume(ctx context.Context, volID volumeID, cr *util.Credentials, volOptions *volumeOptions) error {
err := execCommandErr(
ctx,
"ceph",
"fs",
"subvolume",
"rm",
volOptions.FsName,
string(volID),
"--group_name",
csiSubvolumeGroup,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to purge subvolume %s(%s) in fs %s"), string(volID), err, volOptions.FsName)
if strings.Contains(err.Error(), getVolumeNotFoundErrorString(volID)) {
return ErrVolumeNotFound{err}
}
return err
}
return nil
}

View File

@ -0,0 +1,362 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"github.com/ceph/ceph-csi/internal/util"
"golang.org/x/sys/unix"
"k8s.io/klog"
)
const (
volumeMounterFuse = "fuse"
volumeMounterKernel = "kernel"
netDev = "_netdev"
)
var (
availableMounters []string
// maps a mountpoint to PID of its FUSE daemon
fusePidMap = make(map[string]int)
fusePidMapMtx sync.Mutex
fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`)
)
// Version checking the running kernel and comparing it to known versions that
// have support for quota. Distributors of enterprise Linux have backported
// quota support to previous versions. This function checks if the running
// kernel is one of the versions that have the feature/fixes backported.
//
// `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor
// This can be slit up in the following components:
// - version (1)
// - patchlevel (2)
// - sublevel (3) - optional, defaults to 0
// - extraversion (rc) - optional, matching integers only
// - distribution (.vendor) - optional, match against whole `uname -r` string
//
// For matching multiple versions, the kernelSupport type contains a backport
// bool, which will cause matching
// version+patchlevel+sublevel+(>=extraversion)+(~distribution)
//
// In case the backport bool is false, a simple check for higher versions than
// version+patchlevel+sublevel is done.
func kernelSupportsQuota(release string) bool {
type kernelSupport struct {
version int
patchlevel int
sublevel int
extraversion int // prefix of the part after the first "-"
distribution string // component of full extraversion
backport bool // backports have a fixed version/patchlevel/sublevel
}
quotaSupport := []kernelSupport{
{4, 17, 0, 0, "", false}, // standard 4.17+ versions
{3, 10, 0, 1062, ".el7", true}, // RHEL-7.7
}
vers := strings.Split(strings.SplitN(release, "-", 2)[0], ".")
version, err := strconv.Atoi(vers[0])
if err != nil {
klog.Errorf("failed to parse version from %s: %v", release, err)
return false
}
patchlevel, err := strconv.Atoi(vers[1])
if err != nil {
klog.Errorf("failed to parse patchlevel from %s: %v", release, err)
return false
}
sublevel := 0
if len(vers) >= 3 {
sublevel, err = strconv.Atoi(vers[2])
if err != nil {
klog.Errorf("failed to parse sublevel from %s: %v", release, err)
return false
}
}
extra := strings.SplitN(release, "-", 2)
extraversion := 0
if len(extra) == 2 {
// ignore errors, 1st component of extraversion does not need to be an int
extraversion, err = strconv.Atoi(strings.Split(extra[1], ".")[0])
if err != nil {
// "go lint" wants err to be checked...
extraversion = 0
}
}
// compare running kernel against known versions
for _, kernel := range quotaSupport {
if !kernel.backport {
// deal with the default case(s), find >= match for version, patchlevel, sublevel
if version > kernel.version || (version == kernel.version && patchlevel > kernel.patchlevel) ||
(version == kernel.version && patchlevel == kernel.patchlevel && sublevel >= kernel.sublevel) {
return true
}
} else {
// specific backport, match distribution initially
if !strings.Contains(release, kernel.distribution) {
continue
}
// strict match version, patchlevel, sublevel, and >= match extraversion
if version == kernel.version && patchlevel == kernel.patchlevel &&
sublevel == kernel.sublevel && extraversion >= kernel.extraversion {
return true
}
}
}
klog.Errorf("kernel %s does not support quota", release)
return false
}
// Load available ceph mounters installed on system into availableMounters
// Called from driver.go's Run()
func loadAvailableMounters(conf *util.Config) error {
// #nosec
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
// #nosec
kernelMounterProbe := exec.Command("mount.ceph")
err := kernelMounterProbe.Run()
if err != nil {
klog.Errorf("failed to run mount.ceph %v", err)
} else {
// fetch the current running kernel info
utsname := unix.Utsname{}
err = unix.Uname(&utsname)
if err != nil {
return err
}
release := string(utsname.Release[:64])
if conf.ForceKernelCephFS || kernelSupportsQuota(release) {
klog.V(1).Infof("loaded mounter: %s", volumeMounterKernel)
availableMounters = append(availableMounters, volumeMounterKernel)
} else {
klog.V(1).Infof("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
}
}
err = fuseMounterProbe.Run()
if err != nil {
klog.Errorf("failed to run ceph-fuse %v", err)
} else {
klog.V(1).Infof("loaded mounter: %s", volumeMounterFuse)
availableMounters = append(availableMounters, volumeMounterFuse)
}
if len(availableMounters) == 0 {
return errors.New("no ceph mounters found on system")
}
return nil
}
type volumeMounter interface {
mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error
name() string
}
func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
// Get the mounter from the configuration
wantMounter := volOptions.Mounter
// Verify that it's available
var chosenMounter string
for _, availMounter := range availableMounters {
if availMounter == wantMounter {
chosenMounter = wantMounter
break
}
}
if chosenMounter == "" {
// Otherwise pick whatever is left
chosenMounter = availableMounters[0]
klog.V(4).Infof("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
}
// Create the mounter
switch chosenMounter {
case volumeMounterFuse:
return &fuseMounter{}, nil
case volumeMounterKernel:
return &kernelMounter{}, nil
}
return nil, fmt.Errorf("unknown mounter '%s'", chosenMounter)
}
type fuseMounter struct{}
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
args := []string{
mountPoint,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile,
"-r", volOptions.RootPath,
"-o", "nonempty",
}
if volOptions.FuseMountOptions != "" {
args = append(args, ","+volOptions.FuseMountOptions)
}
if volOptions.FsName != "" {
args = append(args, "--client_mds_namespace="+volOptions.FsName)
}
_, stderr, err := execCommand(ctx, "ceph-fuse", args[:]...)
if err != nil {
return err
}
// Parse the output:
// We need "starting fuse" meaning the mount is ok
// and PID of the ceph-fuse daemon for unmount
match := fusePidRx.FindSubmatch(stderr)
if len(match) != 2 {
return fmt.Errorf("ceph-fuse failed: %s", stderr)
}
pid, err := strconv.Atoi(string(match[1]))
if err != nil {
return fmt.Errorf("failed to parse FUSE daemon PID: %v", err)
}
fusePidMapMtx.Lock()
fusePidMap[mountPoint] = pid
fusePidMapMtx.Unlock()
return nil
}
func (m *fuseMounter) mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil {
return err
}
return mountFuse(ctx, mountPoint, cr, volOptions)
}
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
type kernelMounter struct{}
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
return err
}
args := []string{
"-t", "ceph",
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
mountPoint,
}
optionsStr := fmt.Sprintf("name=%s,secretfile=%s", cr.ID, cr.KeyFile)
if volOptions.FsName != "" {
optionsStr += fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
}
if volOptions.KernelMountOptions != "" {
optionsStr += fmt.Sprintf(",%s", volOptions.KernelMountOptions)
}
if !strings.Contains(volOptions.KernelMountOptions, netDev) {
optionsStr += fmt.Sprintf(",%s", netDev)
}
args = append(args, "-o", optionsStr)
return execCommandErr(ctx, "mount", args[:]...)
}
func (m *kernelMounter) mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil {
return err
}
return mountKernel(ctx, mountPoint, cr, volOptions)
}
func (m *kernelMounter) name() string { return "Ceph kernel client" }
func bindMount(ctx context.Context, from, to string, readOnly bool, mntOptions []string) error {
mntOptionSli := strings.Join(mntOptions, ",")
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, from, to); err != nil {
return fmt.Errorf("failed to bind-mount %s to %s: %v", from, to, err)
}
if readOnly {
mntOptionSli += ",remount"
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, to); err != nil {
return fmt.Errorf("failed read-only remount of %s: %v", to, err)
}
}
return nil
}
func unmountVolume(ctx context.Context, mountPoint string) error {
if err := execCommandErr(ctx, "umount", mountPoint); err != nil {
if strings.Contains(err.Error(), fmt.Sprintf("exit status 32: umount: %s: not mounted", mountPoint)) ||
strings.Contains(err.Error(), "No such file or directory") {
return nil
}
return err
}
fusePidMapMtx.Lock()
pid, ok := fusePidMap[mountPoint]
if ok {
delete(fusePidMap, mountPoint)
}
fusePidMapMtx.Unlock()
if ok {
p, err := os.FindProcess(pid)
if err != nil {
klog.Warningf(util.Log(ctx, "failed to find process %d: %v"), pid, err)
} else {
if _, err = p.Wait(); err != nil {
klog.Warningf(util.Log(ctx, "%d is not a child process: %v"), pid, err)
}
}
}
return nil
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"testing"
)
func init() {
}
func TestKernelSupportsQuota(t *testing.T) {
supportsQuota := []string{
"4.17.0",
"5.0.0",
"4.17.0-rc1",
"4.18.0-80.el8",
"3.10.0-1062.el7.x86_64", // 1st backport
"3.10.0-1062.4.1.el7.x86_64", // updated backport
}
noQuota := []string{
"2.6.32-754.15.3.el6.x86_64", // too old
"3.10.0-123.el7.x86_64", // too old for backport
"3.10.0-1062.4.1.el8.x86_64", // nonexisting RHEL-8 kernel
"3.11.0-123.el7.x86_64", // nonexisting RHEL-7 kernel
}
for _, kernel := range supportsQuota {
ok := kernelSupportsQuota(kernel)
if !ok {
t.Errorf("support expected for %s", kernel)
}
}
for _, kernel := range noQuota {
ok := kernelSupportsQuota(kernel)
if ok {
t.Errorf("no support expected for %s", kernel)
}
}
}

View File

@ -0,0 +1,382 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"fmt"
"strconv"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/pkg/errors"
"github.com/ceph/ceph-csi/internal/util"
)
type volumeOptions struct {
TopologyPools *[]util.TopologyConstrainedPool
TopologyRequirement *csi.TopologyRequirement
Topology map[string]string
RequestName string
NamePrefix string
Size int64
ClusterID string
FsName string
FscID int64
MetadataPool string
Monitors string `json:"monitors"`
Pool string `json:"pool"`
RootPath string `json:"rootPath"`
Mounter string `json:"mounter"`
ProvisionVolume bool `json:"provisionVolume"`
KernelMountOptions string `json:"kernelMountOptions"`
FuseMountOptions string `json:"fuseMountOptions"`
}
func validateNonEmptyField(field, fieldName string) error {
if field == "" {
return fmt.Errorf("parameter '%s' cannot be empty", fieldName)
}
return nil
}
func extractOptionalOption(dest *string, optionLabel string, options map[string]string) error {
opt, ok := options[optionLabel]
if !ok {
// Option not found, no error as it is optional
return nil
}
if err := validateNonEmptyField(opt, optionLabel); err != nil {
return err
}
*dest = opt
return nil
}
func extractOption(dest *string, optionLabel string, options map[string]string) error {
opt, ok := options[optionLabel]
if !ok {
return fmt.Errorf("missing required field %s", optionLabel)
}
if err := validateNonEmptyField(opt, optionLabel); err != nil {
return err
}
*dest = opt
return nil
}
func validateMounter(m string) error {
switch m {
case volumeMounterFuse:
case volumeMounterKernel:
default:
return fmt.Errorf("unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m)
}
return nil
}
func extractMounter(dest *string, options map[string]string) error {
if err := extractOptionalOption(dest, "mounter", options); err != nil {
return err
}
if *dest != "" {
if err := validateMounter(*dest); err != nil {
return err
}
}
return nil
}
func getMonsAndClusterID(options map[string]string) (string, string, error) {
clusterID, ok := options["clusterID"]
if !ok {
err := fmt.Errorf("clusterID must be set")
return "", "", err
}
if err := validateNonEmptyField(clusterID, "clusterID"); err != nil {
return "", "", err
}
monitors, err := util.Mons(csiConfigFile, clusterID)
if err != nil {
err = errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", clusterID)
return "", "", err
}
return monitors, clusterID, err
}
// newVolumeOptions generates a new instance of volumeOptions from the provided
// CSI request parameters
func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
secret map[string]string) (*volumeOptions, error) {
var (
opts volumeOptions
err error
)
volOptions := req.GetParameters()
opts.Monitors, opts.ClusterID, err = getMonsAndClusterID(volOptions)
if err != nil {
return nil, err
}
if err = extractOptionalOption(&opts.Pool, "pool", volOptions); err != nil {
return nil, err
}
if err = extractMounter(&opts.Mounter, volOptions); err != nil {
return nil, err
}
if err = extractOption(&opts.FsName, "fsName", volOptions); err != nil {
return nil, err
}
if err = extractOptionalOption(&opts.KernelMountOptions, "kernelMountOptions", volOptions); err != nil {
return nil, err
}
if err = extractOptionalOption(&opts.FuseMountOptions, "fuseMountOptions", volOptions); err != nil {
return nil, err
}
opts.RequestName = requestName
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
opts.FscID, err = getFscID(ctx, opts.Monitors, cr, opts.FsName)
if err != nil {
return nil, err
}
opts.MetadataPool, err = getMetadataPool(ctx, opts.Monitors, cr, opts.FsName)
if err != nil {
return nil, err
}
// store topology information from the request
opts.TopologyPools, opts.TopologyRequirement, err = util.GetTopologyFromRequest(req)
if err != nil {
return nil, err
}
// TODO: we need an API to fetch subvolume attributes (size/datapool and others), based
// on which we can evaluate which topology this belongs to.
// CephFS tracker: https://tracker.ceph.com/issues/44277
if opts.TopologyPools != nil {
return nil, errors.New("topology based provisioning is not supported for CephFS backed volumes")
}
opts.ProvisionVolume = true
return &opts, nil
}
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and volumeIdentifier
// from the provided CSI VolumeID
func newVolumeOptionsFromVolID(ctx context.Context, volID string, volOpt, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
var (
vi util.CSIIdentifier
volOptions volumeOptions
vid volumeIdentifier
)
// Decode the VolID first, to detect older volumes or pre-provisioned volumes
// before other errors
err := vi.DecomposeCSIID(volID)
if err != nil {
err = fmt.Errorf("error decoding volume ID (%s) (%s)", err, volID)
return nil, nil, ErrInvalidVolID{err}
}
volOptions.ClusterID = vi.ClusterID
vid.VolumeID = volID
volOptions.FscID = vi.LocationID
if volOptions.Monitors, err = util.Mons(csiConfigFile, vi.ClusterID); err != nil {
return nil, nil, errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", vi.ClusterID)
}
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
return nil, nil, err
}
defer cr.DeleteCredentials()
volOptions.FsName, err = getFsName(ctx, volOptions.Monitors, cr, volOptions.FscID)
if err != nil {
return nil, nil, err
}
volOptions.MetadataPool, err = getMetadataPool(ctx, volOptions.Monitors, cr, volOptions.FsName)
if err != nil {
return nil, nil, err
}
imageAttributes, err := volJournal.GetImageAttributes(ctx, volOptions.Monitors, cr,
volOptions.MetadataPool, vi.ObjectUUID, false)
if err != nil {
return nil, nil, err
}
volOptions.RequestName = imageAttributes.RequestName
vid.FsSubvolName = imageAttributes.ImageName
if volOpt != nil {
if err = extractOptionalOption(&volOptions.Pool, "pool", volOpt); err != nil {
return nil, nil, err
}
if err = extractOptionalOption(&volOptions.KernelMountOptions, "kernelMountOptions", volOpt); err != nil {
return nil, nil, err
}
if err = extractOptionalOption(&volOptions.FuseMountOptions, "fuseMountOptions", volOpt); err != nil {
return nil, nil, err
}
if err = extractMounter(&volOptions.Mounter, volOpt); err != nil {
return nil, nil, err
}
}
volOptions.ProvisionVolume = true
volOptions.RootPath, err = getVolumeRootPathCeph(ctx, &volOptions, cr, volumeID(vid.FsSubvolName))
if err != nil {
return &volOptions, &vid, err
}
return &volOptions, &vid, nil
}
// newVolumeOptionsFromVersion1Context generates a new instance of volumeOptions and
// volumeIdentifier from the provided CSI volume context, if the provided context was
// for a volume created by version 1.0.0 (or prior) of the CSI plugin
func newVolumeOptionsFromVersion1Context(volID string, options, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
var (
opts volumeOptions
vid volumeIdentifier
provisionVolumeBool string
err error
)
// Check if monitors is part of the options, that is an indicator this is an 1.0.0 volume
if err = extractOption(&opts.Monitors, "monitors", options); err != nil {
return nil, nil, err
}
// check if there are mon values in secret and if so override option retrieved monitors from
// monitors in the secret
mon, err := util.GetMonValFromSecret(secrets)
if err == nil && len(mon) > 0 {
opts.Monitors = mon
}
if err = extractOption(&provisionVolumeBool, "provisionVolume", options); err != nil {
return nil, nil, err
}
if opts.ProvisionVolume, err = strconv.ParseBool(provisionVolumeBool); err != nil {
return nil, nil, fmt.Errorf("failed to parse provisionVolume: %v", err)
}
if opts.ProvisionVolume {
if err = extractOption(&opts.Pool, "pool", options); err != nil {
return nil, nil, err
}
opts.RootPath = getVolumeRootPathCephDeprecated(volumeID(volID))
} else {
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
return nil, nil, err
}
}
if err = extractMounter(&opts.Mounter, options); err != nil {
return nil, nil, err
}
vid.FsSubvolName = volID
vid.VolumeID = volID
return &opts, &vid, nil
}
// newVolumeOptionsFromStaticVolume generates a new instance of volumeOptions and
// volumeIdentifier from the provided CSI volume context, if the provided context is
// detected to be a statically provisioned volume
func newVolumeOptionsFromStaticVolume(volID string, options map[string]string) (*volumeOptions, *volumeIdentifier, error) {
var (
opts volumeOptions
vid volumeIdentifier
staticVol bool
err error
)
val, ok := options["staticVolume"]
if !ok {
return nil, nil, ErrNonStaticVolume{err}
}
if staticVol, err = strconv.ParseBool(val); err != nil {
return nil, nil, fmt.Errorf("failed to parse preProvisionedVolume: %v", err)
}
if !staticVol {
return nil, nil, ErrNonStaticVolume{err}
}
// Volume is static, and ProvisionVolume carries bool stating if it was provisioned, hence
// store NOT of static boolean
opts.ProvisionVolume = !staticVol
opts.Monitors, opts.ClusterID, err = getMonsAndClusterID(options)
if err != nil {
return nil, nil, err
}
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
return nil, nil, err
}
if err = extractOption(&opts.FsName, "fsName", options); err != nil {
return nil, nil, err
}
if err = extractMounter(&opts.Mounter, options); err != nil {
return nil, nil, err
}
vid.FsSubvolName = opts.RootPath
vid.VolumeID = volID
return &opts, &vid, nil
}