cephfs: added mounter probing and --volumemounter cmd arg

This commit is contained in:
gman 2018-04-13 14:53:17 +02:00
parent b7d856e562
commit 886fdccb9b
2 changed files with 53 additions and 17 deletions

View File

@ -21,7 +21,8 @@ import (
"os"
"path"
"github.com/ceph/ceph-csi/pkg/cephfs"
// "github.com/ceph/ceph-csi/pkg/cephfs"
"github.com/gman0/ceph-csi/pkg/cephfs"
"github.com/golang/glog"
)
@ -32,24 +33,25 @@ func init() {
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver")
nodeID = flag.String("nodeid", "", "node id")
nodeId = flag.String("nodeid", "", "node id")
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
)
func main() {
flag.Parse()
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil {
glog.Errorf("failed to create persisten storage for controller %v", err)
glog.Errorf("failed to create persistent storage for controller: %v", err)
os.Exit(1)
}
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil {
glog.Errorf("failed to create persisten storage for node %v", err)
glog.Errorf("failed to create persistent storage for node: %v", err)
os.Exit(1)
}
driver := cephfs.NewCephFSDriver()
driver.Run(*driverName, *nodeID, *endpoint)
driver.Run(*driverName, *nodeId, *endpoint, *volumeMounter)
os.Exit(0)
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package cephfs
import (
"os"
"github.com/golang/glog"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
@ -25,12 +27,13 @@ import (
const (
PluginFolder = "/var/lib/kubelet/plugins/csi-cephfsplugin"
Version = "0.2.0"
)
type cephfsDriver struct {
driver *csicommon.CSIDriver
ids *identityServer
is *identityServer
ns *nodeServer
cs *controllerServer
@ -40,9 +43,17 @@ type cephfsDriver struct {
var (
driver *cephfsDriver
version = "0.2.0"
DefaultVolumeMounter string
)
func getVolumeMounterByProbing() string {
if execCommandAndValidate("ceph-fuse", "--version") == nil {
return volumeMounter_fuse
} else {
return volumeMounter_kernel
}
}
func NewCephFSDriver() *cephfsDriver {
return &cephfsDriver{}
}
@ -65,12 +76,35 @@ func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
}
}
func (fs *cephfsDriver) Run(driverName, nodeId, endpoint string) {
glog.Infof("Driver: %v version: %v", driverName, version)
func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string) {
glog.Infof("Driver: %v version: %v", driverName, Version)
// Configuration
if err := os.MkdirAll(volumeCacheRoot, 0755); err != nil {
glog.Fatalf("cephfs: failed to create %s: %v", volumeCacheRoot, err)
return
}
if err := loadVolumeCache(); err != nil {
glog.Errorf("cephfs: failed to read volume cache: %v", err)
}
if volumeMounter != "" {
if err := validateMounter(volumeMounter); err != nil {
glog.Fatalln(err)
} else {
DefaultVolumeMounter = volumeMounter
}
} else {
DefaultVolumeMounter = getVolumeMounterByProbing()
}
glog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
// Initialize default library driver
fs.driver = csicommon.NewCSIDriver(driverName, version, nodeId)
fs.driver = csicommon.NewCSIDriver(driverName, Version, nodeId)
if fs.driver == nil {
glog.Fatalln("Failed to initialize CSI driver")
}
@ -85,11 +119,11 @@ func (fs *cephfsDriver) Run(driverName, nodeId, endpoint string) {
// Create gRPC servers
fs.ids = NewIdentityServer(fs.driver)
fs.is = NewIdentityServer(fs.driver)
fs.ns = NewNodeServer(fs.driver)
fs.cs = NewControllerServer(fs.driver)
server := csicommon.NewNonBlockingGRPCServer()
server.Start(endpoint, fs.ids, fs.cs, fs.ns)
server.Start(endpoint, fs.is, fs.cs, fs.ns)
server.Wait()
}