cephfs: volumemounter probe

The driver will now probe for either ceph fuse/kernel every time
it's about to mount a cephfs volume.

This also affects CreateVolume/DeleteVolume where the mounting
was hard-coded to ceph kernel client till now - mounter configuration
and probing are now honored.
This commit is contained in:
gman 2018-08-14 11:19:41 +02:00
parent 43b9f9aeaa
commit c515a013d3
5 changed files with 92 additions and 37 deletions

View File

@ -46,14 +46,6 @@ var (
DefaultVolumeMounter string DefaultVolumeMounter string
) )
func getVolumeMounterByProbing() string {
if execCommandAndValidate("ceph-fuse", "--version") == nil {
return volumeMounter_fuse
} else {
return volumeMounter_kernel
}
}
func NewCephFSDriver() *cephfsDriver { func NewCephFSDriver() *cephfsDriver {
return &cephfsDriver{} return &cephfsDriver{}
} }
@ -97,7 +89,12 @@ func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string)
DefaultVolumeMounter = volumeMounter DefaultVolumeMounter = volumeMounter
} }
} else { } else {
DefaultVolumeMounter = getVolumeMounterByProbing() availableMounters := getAvailableMounters()
if len(availableMounters) == 0 {
glog.Fatal("no ceph mounters found on system")
}
DefaultVolumeMounter = availableMounters[0]
} }
glog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter) glog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)

View File

@ -134,9 +134,12 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
m := newMounter(volOptions) m, err := newMounter(volOptions)
glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name()) if err != nil {
glog.Errorf("failed to create mounter for volume %s: %v", volId, err)
}
glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name())
if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil { if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil {
glog.Errorf("failed to mount volume %s: %v", volId, err) glog.Errorf("failed to mount volume %s: %v", volId, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())

View File

@ -97,23 +97,6 @@ func storeCephCredentials(volId volumeID, cr *credentials) error {
return nil return nil
} }
func newMounter(volOptions *volumeOptions) volumeMounter {
mounter := volOptions.Mounter
if mounter == "" {
mounter = DefaultVolumeMounter
}
switch mounter {
case volumeMounter_fuse:
return &fuseMounter{}
case volumeMounter_kernel:
return &kernelMounter{}
}
return nil
}
// //
// Controller service request validation // Controller service request validation
// //

View File

@ -60,7 +60,12 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
// Access to cephfs's / is required // Access to cephfs's / is required
volOptions.RootPath = "/" volOptions.RootPath = "/"
if err := mountKernel(cephRoot, adminCr, volOptions, volId); err != nil { m, err := newMounter(volOptions)
if err != nil {
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err) return fmt.Errorf("error mounting ceph root: %v", err)
} }
@ -91,22 +96,28 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
return nil return nil
} }
func purgeVolume(volId volumeID, cr *credentials, volOptions *volumeOptions) error { func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions) error {
// Root path is not set for dynamically provisioned volumes
volOptions.RootPath = "/"
var ( var (
root = getCephRootPath_local(volId) cephRoot = getCephRootPath_local(volId)
volRoot = getCephRootVolumePath_local(volId) volRoot = getCephRootVolumePath_local(volId)
volRootDeleting = volRoot + "-deleting" volRootDeleting = volRoot + "-deleting"
) )
if err := createMountPoint(root); err != nil { if err := createMountPoint(cephRoot); err != nil {
return err return err
} }
if err := mountKernel(root, cr, volOptions, volId); err != nil { // Root path is not set for dynamically provisioned volumes
return err // Access to cephfs's / is required
volOptions.RootPath = "/"
m, err := newMounter(volOptions)
if err != nil {
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err)
} }
defer func() { defer func() {

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"os" "os"
"os/exec"
) )
const ( const (
@ -113,3 +114,63 @@ func unmountVolume(mountPoint string) error {
func createMountPoint(root string) error { func createMountPoint(root string) error {
return os.MkdirAll(root, 0750) return os.MkdirAll(root, 0750)
} }
func getAvailableMounters() []string {
var ms []string
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
kernelMounterProbe := exec.Command("mount.ceph")
if fuseMounterProbe.Run() == nil {
ms = append(ms, volumeMounter_fuse)
}
if kernelMounterProbe.Run() == nil {
ms = append(ms, volumeMounter_kernel)
}
return ms
}
func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
// Get the mounter from the configuration
wantMounter := volOptions.Mounter
if wantMounter == "" {
wantMounter = DefaultVolumeMounter
}
// Verify that it's available
availableMounters := getAvailableMounters()
if len(availableMounters) == 0 {
return nil, fmt.Errorf("no ceph mounters found on system")
}
var chosenMounter string
for _, availMounter := range getAvailableMounters() {
if chosenMounter == "" {
if availMounter == wantMounter {
chosenMounter = wantMounter
}
}
}
if chosenMounter == "" {
// Otherwise pick whatever is left
chosenMounter = availableMounters[0]
}
// Create the mounter
switch chosenMounter {
case volumeMounter_fuse:
return &fuseMounter{}, nil
case volumeMounter_kernel:
return &kernelMounter{}, nil
}
return nil, fmt.Errorf("unknown mounter '%s'", chosenMounter)
}